Commit | Line | Data |
---|---|---|
f2ba57b5 CK |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <drm/drmP.h> | |
34 | #include <drm/drm.h> | |
35 | ||
36 | #include "radeon.h" | |
37 | #include "r600d.h" | |
38 | ||
55b51c88 CK |
39 | /* 1 second timeout */ |
40 | #define UVD_IDLE_TIMEOUT_MS 1000 | |
41 | ||
f2ba57b5 | 42 | /* Firmware Names */ |
14e935ae CK |
43 | #define FIRMWARE_R600 "radeon/R600_uvd.bin" |
44 | #define FIRMWARE_RS780 "radeon/RS780_uvd.bin" | |
45 | #define FIRMWARE_RV770 "radeon/RV770_uvd.bin" | |
f2ba57b5 CK |
46 | #define FIRMWARE_RV710 "radeon/RV710_uvd.bin" |
47 | #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" | |
48 | #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" | |
49 | #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" | |
87167bb1 | 50 | #define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin" |
f2ba57b5 | 51 | |
14e935ae CK |
52 | MODULE_FIRMWARE(FIRMWARE_R600); |
53 | MODULE_FIRMWARE(FIRMWARE_RS780); | |
54 | MODULE_FIRMWARE(FIRMWARE_RV770); | |
f2ba57b5 CK |
55 | MODULE_FIRMWARE(FIRMWARE_RV710); |
56 | MODULE_FIRMWARE(FIRMWARE_CYPRESS); | |
57 | MODULE_FIRMWARE(FIRMWARE_SUMO); | |
58 | MODULE_FIRMWARE(FIRMWARE_TAHITI); | |
87167bb1 | 59 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); |
f2ba57b5 | 60 | |
55b51c88 CK |
61 | static void radeon_uvd_idle_work_handler(struct work_struct *work); |
62 | ||
f2ba57b5 CK |
63 | int radeon_uvd_init(struct radeon_device *rdev) |
64 | { | |
f2ba57b5 CK |
65 | unsigned long bo_size; |
66 | const char *fw_name; | |
67 | int i, r; | |
68 | ||
55b51c88 CK |
69 | INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); |
70 | ||
f2ba57b5 | 71 | switch (rdev->family) { |
14e935ae CK |
72 | case CHIP_RV610: |
73 | case CHIP_RV630: | |
74 | case CHIP_RV670: | |
75 | case CHIP_RV620: | |
76 | case CHIP_RV635: | |
77 | fw_name = FIRMWARE_R600; | |
78 | break; | |
79 | ||
80 | case CHIP_RS780: | |
81 | case CHIP_RS880: | |
82 | fw_name = FIRMWARE_RS780; | |
83 | break; | |
84 | ||
85 | case CHIP_RV770: | |
86 | fw_name = FIRMWARE_RV770; | |
87 | break; | |
88 | ||
f2ba57b5 CK |
89 | case CHIP_RV710: |
90 | case CHIP_RV730: | |
91 | case CHIP_RV740: | |
92 | fw_name = FIRMWARE_RV710; | |
93 | break; | |
94 | ||
95 | case CHIP_CYPRESS: | |
96 | case CHIP_HEMLOCK: | |
97 | case CHIP_JUNIPER: | |
98 | case CHIP_REDWOOD: | |
99 | case CHIP_CEDAR: | |
100 | fw_name = FIRMWARE_CYPRESS; | |
101 | break; | |
102 | ||
103 | case CHIP_SUMO: | |
104 | case CHIP_SUMO2: | |
105 | case CHIP_PALM: | |
106 | case CHIP_CAYMAN: | |
107 | case CHIP_BARTS: | |
108 | case CHIP_TURKS: | |
109 | case CHIP_CAICOS: | |
110 | fw_name = FIRMWARE_SUMO; | |
111 | break; | |
112 | ||
113 | case CHIP_TAHITI: | |
114 | case CHIP_VERDE: | |
115 | case CHIP_PITCAIRN: | |
116 | case CHIP_ARUBA: | |
5d029339 | 117 | case CHIP_OLAND: |
f2ba57b5 CK |
118 | fw_name = FIRMWARE_TAHITI; |
119 | break; | |
120 | ||
87167bb1 CK |
121 | case CHIP_BONAIRE: |
122 | case CHIP_KABINI: | |
123 | case CHIP_KAVERI: | |
4256331a | 124 | case CHIP_HAWAII: |
3f6f0737 | 125 | case CHIP_MULLINS: |
87167bb1 CK |
126 | fw_name = FIRMWARE_BONAIRE; |
127 | break; | |
128 | ||
f2ba57b5 CK |
129 | default: |
130 | return -EINVAL; | |
131 | } | |
132 | ||
4ad9c1c7 | 133 | r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); |
f2ba57b5 CK |
134 | if (r) { |
135 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", | |
136 | fw_name); | |
f2ba57b5 CK |
137 | return r; |
138 | } | |
139 | ||
4ad9c1c7 | 140 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
feba9b0b CK |
141 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE + |
142 | RADEON_GPU_PAGE_SIZE; | |
f2ba57b5 | 143 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, |
831b6966 ML |
144 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
145 | NULL, &rdev->uvd.vcpu_bo); | |
f2ba57b5 CK |
146 | if (r) { |
147 | dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); | |
148 | return r; | |
149 | } | |
150 | ||
9cc2e0e9 CK |
151 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); |
152 | if (r) { | |
153 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | |
154 | dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); | |
f2ba57b5 | 155 | return r; |
9cc2e0e9 | 156 | } |
f2ba57b5 | 157 | |
9cc2e0e9 CK |
158 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, |
159 | &rdev->uvd.gpu_addr); | |
160 | if (r) { | |
161 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
162 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | |
163 | dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); | |
164 | return r; | |
165 | } | |
f2ba57b5 | 166 | |
9cc2e0e9 CK |
167 | r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); |
168 | if (r) { | |
169 | dev_err(rdev->dev, "(%d) UVD map failed\n", r); | |
f2ba57b5 | 170 | return r; |
9cc2e0e9 CK |
171 | } |
172 | ||
173 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
174 | ||
f2ba57b5 CK |
175 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
176 | atomic_set(&rdev->uvd.handles[i], 0); | |
177 | rdev->uvd.filp[i] = NULL; | |
85a129ca | 178 | rdev->uvd.img_size[i] = 0; |
f2ba57b5 CK |
179 | } |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
184 | void radeon_uvd_fini(struct radeon_device *rdev) | |
f2ba57b5 CK |
185 | { |
186 | int r; | |
187 | ||
188 | if (rdev->uvd.vcpu_bo == NULL) | |
9cc2e0e9 | 189 | return; |
f2ba57b5 CK |
190 | |
191 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); | |
192 | if (!r) { | |
193 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | |
194 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | |
195 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
196 | } | |
9cc2e0e9 CK |
197 | |
198 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | |
4ad9c1c7 | 199 | |
d9654413 JG |
200 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); |
201 | ||
4ad9c1c7 | 202 | release_firmware(rdev->uvd_fw); |
f2ba57b5 CK |
203 | } |
204 | ||
9cc2e0e9 | 205 | int radeon_uvd_suspend(struct radeon_device *rdev) |
f2ba57b5 | 206 | { |
12e49fea | 207 | int i, r; |
f2ba57b5 CK |
208 | |
209 | if (rdev->uvd.vcpu_bo == NULL) | |
9cc2e0e9 | 210 | return 0; |
f2ba57b5 | 211 | |
12e49fea CK |
212 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
213 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); | |
214 | if (handle != 0) { | |
215 | struct radeon_fence *fence; | |
4ad9c1c7 | 216 | |
12e49fea | 217 | radeon_uvd_note_usage(rdev); |
4ad9c1c7 | 218 | |
12e49fea CK |
219 | r = radeon_uvd_get_destroy_msg(rdev, |
220 | R600_RING_TYPE_UVD_INDEX, handle, &fence); | |
221 | if (r) { | |
222 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
223 | continue; | |
224 | } | |
4ad9c1c7 | 225 | |
12e49fea CK |
226 | radeon_fence_wait(fence, false); |
227 | radeon_fence_unref(&fence); | |
4ad9c1c7 | 228 | |
12e49fea CK |
229 | rdev->uvd.filp[i] = NULL; |
230 | atomic_set(&rdev->uvd.handles[i], 0); | |
231 | } | |
232 | } | |
f2ba57b5 | 233 | |
9cc2e0e9 CK |
234 | return 0; |
235 | } | |
089920f2 | 236 | |
9cc2e0e9 CK |
237 | int radeon_uvd_resume(struct radeon_device *rdev) |
238 | { | |
4ad9c1c7 CK |
239 | unsigned size; |
240 | void *ptr; | |
241 | ||
9cc2e0e9 CK |
242 | if (rdev->uvd.vcpu_bo == NULL) |
243 | return -EINVAL; | |
f2ba57b5 | 244 | |
4ad9c1c7 CK |
245 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); |
246 | ||
247 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | |
248 | size -= rdev->uvd_fw->size; | |
249 | ||
250 | ptr = rdev->uvd.cpu_addr; | |
251 | ptr += rdev->uvd_fw->size; | |
252 | ||
12e49fea | 253 | memset(ptr, 0, size); |
f2ba57b5 | 254 | |
f2ba57b5 CK |
255 | return 0; |
256 | } | |
257 | ||
3852752c CK |
258 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, |
259 | uint32_t allowed_domains) | |
f2ba57b5 | 260 | { |
f1217ed0 CK |
261 | int i; |
262 | ||
263 | for (i = 0; i < rbo->placement.num_placement; ++i) { | |
264 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | |
265 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | |
266 | } | |
3852752c CK |
267 | |
268 | /* If it must be in VRAM it must be in the first segment as well */ | |
269 | if (allowed_domains == RADEON_GEM_DOMAIN_VRAM) | |
270 | return; | |
271 | ||
272 | /* abort if we already have more than one placement */ | |
273 | if (rbo->placement.num_placement > 1) | |
274 | return; | |
275 | ||
276 | /* add another 256MB segment */ | |
277 | rbo->placements[1] = rbo->placements[0]; | |
278 | rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; | |
279 | rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; | |
280 | rbo->placement.num_placement++; | |
281 | rbo->placement.num_busy_placement++; | |
f2ba57b5 CK |
282 | } |
283 | ||
284 | void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |
285 | { | |
286 | int i, r; | |
287 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
641a0059 CK |
288 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
289 | if (handle != 0 && rdev->uvd.filp[i] == filp) { | |
f2ba57b5 CK |
290 | struct radeon_fence *fence; |
291 | ||
c154a763 CK |
292 | radeon_uvd_note_usage(rdev); |
293 | ||
f2ba57b5 CK |
294 | r = radeon_uvd_get_destroy_msg(rdev, |
295 | R600_RING_TYPE_UVD_INDEX, handle, &fence); | |
296 | if (r) { | |
297 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
298 | continue; | |
299 | } | |
300 | ||
301 | radeon_fence_wait(fence, false); | |
302 | radeon_fence_unref(&fence); | |
303 | ||
304 | rdev->uvd.filp[i] = NULL; | |
305 | atomic_set(&rdev->uvd.handles[i], 0); | |
306 | } | |
307 | } | |
308 | } | |
309 | ||
310 | static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | |
311 | { | |
312 | unsigned stream_type = msg[4]; | |
313 | unsigned width = msg[6]; | |
314 | unsigned height = msg[7]; | |
315 | unsigned dpb_size = msg[9]; | |
316 | unsigned pitch = msg[28]; | |
317 | ||
318 | unsigned width_in_mb = width / 16; | |
319 | unsigned height_in_mb = ALIGN(height / 16, 2); | |
320 | ||
321 | unsigned image_size, tmp, min_dpb_size; | |
322 | ||
323 | image_size = width * height; | |
324 | image_size += image_size / 2; | |
325 | image_size = ALIGN(image_size, 1024); | |
326 | ||
327 | switch (stream_type) { | |
328 | case 0: /* H264 */ | |
329 | ||
330 | /* reference picture buffer */ | |
331 | min_dpb_size = image_size * 17; | |
332 | ||
333 | /* macroblock context buffer */ | |
334 | min_dpb_size += width_in_mb * height_in_mb * 17 * 192; | |
335 | ||
336 | /* IT surface buffer */ | |
337 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
338 | break; | |
339 | ||
340 | case 1: /* VC1 */ | |
341 | ||
342 | /* reference picture buffer */ | |
343 | min_dpb_size = image_size * 3; | |
344 | ||
345 | /* CONTEXT_BUFFER */ | |
346 | min_dpb_size += width_in_mb * height_in_mb * 128; | |
347 | ||
348 | /* IT surface buffer */ | |
349 | min_dpb_size += width_in_mb * 64; | |
350 | ||
351 | /* DB surface buffer */ | |
352 | min_dpb_size += width_in_mb * 128; | |
353 | ||
354 | /* BP */ | |
355 | tmp = max(width_in_mb, height_in_mb); | |
356 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | |
357 | break; | |
358 | ||
359 | case 3: /* MPEG2 */ | |
360 | ||
361 | /* reference picture buffer */ | |
362 | min_dpb_size = image_size * 3; | |
363 | break; | |
364 | ||
365 | case 4: /* MPEG4 */ | |
366 | ||
367 | /* reference picture buffer */ | |
368 | min_dpb_size = image_size * 3; | |
369 | ||
370 | /* CM */ | |
371 | min_dpb_size += width_in_mb * height_in_mb * 64; | |
372 | ||
373 | /* IT surface buffer */ | |
374 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | |
375 | break; | |
376 | ||
377 | default: | |
378 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | |
379 | return -EINVAL; | |
380 | } | |
381 | ||
382 | if (width > pitch) { | |
383 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | |
384 | return -EINVAL; | |
385 | } | |
386 | ||
387 | if (dpb_size < min_dpb_size) { | |
388 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | |
389 | dpb_size, min_dpb_size); | |
390 | return -EINVAL; | |
391 | } | |
392 | ||
393 | buf_sizes[0x1] = dpb_size; | |
394 | buf_sizes[0x2] = image_size; | |
395 | return 0; | |
396 | } | |
397 | ||
d52cdfa4 CK |
398 | static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, |
399 | unsigned stream_type) | |
400 | { | |
401 | switch (stream_type) { | |
402 | case 0: /* H264 */ | |
403 | case 1: /* VC1 */ | |
404 | /* always supported */ | |
405 | return 0; | |
406 | ||
407 | case 3: /* MPEG2 */ | |
408 | case 4: /* MPEG4 */ | |
409 | /* only since UVD 3 */ | |
410 | if (p->rdev->family >= CHIP_PALM) | |
411 | return 0; | |
412 | ||
413 | /* fall through */ | |
414 | default: | |
415 | DRM_ERROR("UVD codec not supported by hardware %d!\n", | |
416 | stream_type); | |
417 | return -EINVAL; | |
418 | } | |
419 | } | |
420 | ||
f2ba57b5 CK |
421 | static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, |
422 | unsigned offset, unsigned buf_sizes[]) | |
423 | { | |
424 | int32_t *msg, msg_type, handle; | |
85a129ca | 425 | unsigned img_size = 0; |
f2c24b83 | 426 | struct fence *f; |
f2ba57b5 CK |
427 | void *ptr; |
428 | ||
429 | int i, r; | |
430 | ||
431 | if (offset & 0x3F) { | |
432 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | |
433 | return -EINVAL; | |
434 | } | |
435 | ||
f2c24b83 ML |
436 | f = reservation_object_get_excl(bo->tbo.resv); |
437 | if (f) { | |
438 | r = radeon_fence_wait((struct radeon_fence *)f, false); | |
112a6d0c CK |
439 | if (r) { |
440 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | |
441 | return r; | |
442 | } | |
443 | } | |
444 | ||
f2ba57b5 | 445 | r = radeon_bo_kmap(bo, &ptr); |
56cc2c15 CK |
446 | if (r) { |
447 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | |
f2ba57b5 | 448 | return r; |
56cc2c15 | 449 | } |
f2ba57b5 CK |
450 | |
451 | msg = ptr + offset; | |
452 | ||
453 | msg_type = msg[1]; | |
454 | handle = msg[2]; | |
455 | ||
456 | if (handle == 0) { | |
457 | DRM_ERROR("Invalid UVD handle!\n"); | |
458 | return -EINVAL; | |
459 | } | |
460 | ||
a1b403da CK |
461 | switch (msg_type) { |
462 | case 0: | |
463 | /* it's a create msg, calc image size (width * height) */ | |
464 | img_size = msg[7] * msg[8]; | |
d52cdfa4 CK |
465 | |
466 | r = radeon_uvd_validate_codec(p, msg[4]); | |
a1b403da | 467 | radeon_bo_kunmap(bo); |
d52cdfa4 CK |
468 | if (r) |
469 | return r; | |
a1b403da CK |
470 | |
471 | /* try to alloc a new handle */ | |
472 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
473 | if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { | |
474 | DRM_ERROR("Handle 0x%x already in use!\n", handle); | |
475 | return -EINVAL; | |
476 | } | |
477 | ||
478 | if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { | |
479 | p->rdev->uvd.filp[i] = p->filp; | |
480 | p->rdev->uvd.img_size[i] = img_size; | |
481 | return 0; | |
482 | } | |
483 | } | |
484 | ||
485 | DRM_ERROR("No more free UVD handles!\n"); | |
486 | return -EINVAL; | |
487 | ||
488 | case 1: | |
d52cdfa4 CK |
489 | /* it's a decode msg, validate codec and calc buffer sizes */ |
490 | r = radeon_uvd_validate_codec(p, msg[4]); | |
491 | if (!r) | |
492 | r = radeon_uvd_cs_msg_decode(msg, buf_sizes); | |
f2ba57b5 CK |
493 | radeon_bo_kunmap(bo); |
494 | if (r) | |
495 | return r; | |
496 | ||
a1b403da CK |
497 | /* validate the handle */ |
498 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
499 | if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { | |
500 | if (p->rdev->uvd.filp[i] != p->filp) { | |
501 | DRM_ERROR("UVD handle collision detected!\n"); | |
502 | return -EINVAL; | |
503 | } | |
504 | return 0; | |
505 | } | |
506 | } | |
507 | ||
508 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); | |
509 | return -ENOENT; | |
510 | ||
511 | case 2: | |
f2ba57b5 CK |
512 | /* it's a destroy msg, free the handle */ |
513 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | |
514 | atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); | |
515 | radeon_bo_kunmap(bo); | |
516 | return 0; | |
f2ba57b5 | 517 | |
a1b403da | 518 | default: |
f2ba57b5 | 519 | |
a1b403da CK |
520 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
521 | return -EINVAL; | |
f2ba57b5 CK |
522 | } |
523 | ||
a1b403da | 524 | BUG(); |
f2ba57b5 CK |
525 | return -EINVAL; |
526 | } | |
527 | ||
528 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |
529 | int data0, int data1, | |
56cc2c15 | 530 | unsigned buf_sizes[], bool *has_msg_cmd) |
f2ba57b5 CK |
531 | { |
532 | struct radeon_cs_chunk *relocs_chunk; | |
1d0c0942 | 533 | struct radeon_bo_list *reloc; |
f2ba57b5 CK |
534 | unsigned idx, cmd, offset; |
535 | uint64_t start, end; | |
536 | int r; | |
537 | ||
6d2d13dd | 538 | relocs_chunk = p->chunk_relocs; |
f2ba57b5 CK |
539 | offset = radeon_get_ib_value(p, data0); |
540 | idx = radeon_get_ib_value(p, data1); | |
541 | if (idx >= relocs_chunk->length_dw) { | |
542 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | |
543 | idx, relocs_chunk->length_dw); | |
544 | return -EINVAL; | |
545 | } | |
546 | ||
466be338 | 547 | reloc = &p->relocs[(idx / 4)]; |
df0af440 | 548 | start = reloc->gpu_offset; |
f2ba57b5 CK |
549 | end = start + radeon_bo_size(reloc->robj); |
550 | start += offset; | |
551 | ||
552 | p->ib.ptr[data0] = start & 0xFFFFFFFF; | |
553 | p->ib.ptr[data1] = start >> 32; | |
554 | ||
555 | cmd = radeon_get_ib_value(p, p->idx) >> 1; | |
556 | ||
557 | if (cmd < 0x4) { | |
695daf1a LL |
558 | if (end <= start) { |
559 | DRM_ERROR("invalid reloc offset %X!\n", offset); | |
560 | return -EINVAL; | |
561 | } | |
f2ba57b5 | 562 | if ((end - start) < buf_sizes[cmd]) { |
56cc2c15 | 563 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
f2ba57b5 CK |
564 | (unsigned)(end - start), buf_sizes[cmd]); |
565 | return -EINVAL; | |
566 | } | |
567 | ||
568 | } else if (cmd != 0x100) { | |
569 | DRM_ERROR("invalid UVD command %X!\n", cmd); | |
570 | return -EINVAL; | |
571 | } | |
572 | ||
bae651db | 573 | if ((start >> 28) != ((end - 1) >> 28)) { |
a92c7d55 CK |
574 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", |
575 | start, end); | |
576 | return -EINVAL; | |
f2ba57b5 CK |
577 | } |
578 | ||
bcf6f1e9 CK |
579 | /* TODO: is this still necessary on NI+ ? */ |
580 | if ((cmd == 0 || cmd == 0x3) && | |
a92c7d55 CK |
581 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { |
582 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | |
f2ba57b5 CK |
583 | start, end); |
584 | return -EINVAL; | |
585 | } | |
586 | ||
a92c7d55 | 587 | if (cmd == 0) { |
56cc2c15 CK |
588 | if (*has_msg_cmd) { |
589 | DRM_ERROR("More than one message in a UVD-IB!\n"); | |
590 | return -EINVAL; | |
591 | } | |
592 | *has_msg_cmd = true; | |
a92c7d55 CK |
593 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); |
594 | if (r) | |
595 | return r; | |
56cc2c15 CK |
596 | } else if (!*has_msg_cmd) { |
597 | DRM_ERROR("Message needed before other commands are send!\n"); | |
598 | return -EINVAL; | |
a92c7d55 CK |
599 | } |
600 | ||
f2ba57b5 CK |
601 | return 0; |
602 | } | |
603 | ||
604 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | |
605 | struct radeon_cs_packet *pkt, | |
606 | int *data0, int *data1, | |
56cc2c15 CK |
607 | unsigned buf_sizes[], |
608 | bool *has_msg_cmd) | |
f2ba57b5 CK |
609 | { |
610 | int i, r; | |
611 | ||
612 | p->idx++; | |
613 | for (i = 0; i <= pkt->count; ++i) { | |
614 | switch (pkt->reg + i*4) { | |
615 | case UVD_GPCOM_VCPU_DATA0: | |
616 | *data0 = p->idx; | |
617 | break; | |
618 | case UVD_GPCOM_VCPU_DATA1: | |
619 | *data1 = p->idx; | |
620 | break; | |
621 | case UVD_GPCOM_VCPU_CMD: | |
56cc2c15 CK |
622 | r = radeon_uvd_cs_reloc(p, *data0, *data1, |
623 | buf_sizes, has_msg_cmd); | |
f2ba57b5 CK |
624 | if (r) |
625 | return r; | |
626 | break; | |
627 | case UVD_ENGINE_CNTL: | |
628 | break; | |
629 | default: | |
630 | DRM_ERROR("Invalid reg 0x%X!\n", | |
631 | pkt->reg + i*4); | |
632 | return -EINVAL; | |
633 | } | |
634 | p->idx++; | |
635 | } | |
636 | return 0; | |
637 | } | |
638 | ||
639 | int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |
640 | { | |
641 | struct radeon_cs_packet pkt; | |
642 | int r, data0 = 0, data1 = 0; | |
643 | ||
56cc2c15 CK |
644 | /* does the IB has a msg command */ |
645 | bool has_msg_cmd = false; | |
646 | ||
f2ba57b5 CK |
647 | /* minimum buffer sizes */ |
648 | unsigned buf_sizes[] = { | |
649 | [0x00000000] = 2048, | |
650 | [0x00000001] = 32 * 1024 * 1024, | |
651 | [0x00000002] = 2048 * 1152 * 3, | |
652 | [0x00000003] = 2048, | |
653 | }; | |
654 | ||
6d2d13dd | 655 | if (p->chunk_ib->length_dw % 16) { |
f2ba57b5 | 656 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", |
6d2d13dd | 657 | p->chunk_ib->length_dw); |
f2ba57b5 CK |
658 | return -EINVAL; |
659 | } | |
660 | ||
6d2d13dd | 661 | if (p->chunk_relocs == NULL) { |
f2ba57b5 CK |
662 | DRM_ERROR("No relocation chunk !\n"); |
663 | return -EINVAL; | |
664 | } | |
665 | ||
666 | ||
667 | do { | |
668 | r = radeon_cs_packet_parse(p, &pkt, p->idx); | |
669 | if (r) | |
670 | return r; | |
671 | switch (pkt.type) { | |
672 | case RADEON_PACKET_TYPE0: | |
56cc2c15 CK |
673 | r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, |
674 | buf_sizes, &has_msg_cmd); | |
f2ba57b5 CK |
675 | if (r) |
676 | return r; | |
677 | break; | |
678 | case RADEON_PACKET_TYPE2: | |
679 | p->idx += pkt.count + 2; | |
680 | break; | |
681 | default: | |
682 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); | |
683 | return -EINVAL; | |
684 | } | |
6d2d13dd | 685 | } while (p->idx < p->chunk_ib->length_dw); |
56cc2c15 CK |
686 | |
687 | if (!has_msg_cmd) { | |
688 | DRM_ERROR("UVD-IBs need a msg command!\n"); | |
689 | return -EINVAL; | |
690 | } | |
691 | ||
f2ba57b5 CK |
692 | return 0; |
693 | } | |
694 | ||
695 | static int radeon_uvd_send_msg(struct radeon_device *rdev, | |
feba9b0b | 696 | int ring, uint64_t addr, |
f2ba57b5 CK |
697 | struct radeon_fence **fence) |
698 | { | |
f2ba57b5 | 699 | struct radeon_ib ib; |
f2ba57b5 CK |
700 | int i, r; |
701 | ||
727ddc84 | 702 | r = radeon_ib_get(rdev, ring, &ib, NULL, 64); |
ecff665f | 703 | if (r) |
feba9b0b | 704 | return r; |
f2ba57b5 | 705 | |
f2ba57b5 CK |
706 | ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); |
707 | ib.ptr[1] = addr; | |
708 | ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); | |
709 | ib.ptr[3] = addr >> 32; | |
710 | ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); | |
711 | ib.ptr[5] = 0; | |
712 | for (i = 6; i < 16; ++i) | |
713 | ib.ptr[i] = PACKET2(0); | |
714 | ib.length_dw = 16; | |
715 | ||
1538a9e0 | 716 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
f2ba57b5 CK |
717 | |
718 | if (fence) | |
719 | *fence = radeon_fence_ref(ib.fence); | |
720 | ||
721 | radeon_ib_free(rdev, &ib); | |
ecff665f | 722 | return r; |
f2ba57b5 CK |
723 | } |
724 | ||
725 | /* multiple fence commands without any stream commands in between can | |
726 | crash the vcpu so just try to emmit a dummy create/destroy msg to | |
727 | avoid this */ | |
728 | int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, | |
729 | uint32_t handle, struct radeon_fence **fence) | |
730 | { | |
feba9b0b CK |
731 | /* we use the last page of the vcpu bo for the UVD message */ |
732 | uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - | |
733 | RADEON_GPU_PAGE_SIZE; | |
f2ba57b5 | 734 | |
feba9b0b CK |
735 | uint32_t *msg = rdev->uvd.cpu_addr + offs; |
736 | uint64_t addr = rdev->uvd.gpu_addr + offs; | |
f2ba57b5 | 737 | |
feba9b0b | 738 | int r, i; |
f2ba57b5 | 739 | |
feba9b0b CK |
740 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); |
741 | if (r) | |
f2ba57b5 | 742 | return r; |
f2ba57b5 CK |
743 | |
744 | /* stitch together an UVD create msg */ | |
9b1be4dc AD |
745 | msg[0] = cpu_to_le32(0x00000de4); |
746 | msg[1] = cpu_to_le32(0x00000000); | |
747 | msg[2] = cpu_to_le32(handle); | |
748 | msg[3] = cpu_to_le32(0x00000000); | |
749 | msg[4] = cpu_to_le32(0x00000000); | |
750 | msg[5] = cpu_to_le32(0x00000000); | |
751 | msg[6] = cpu_to_le32(0x00000000); | |
752 | msg[7] = cpu_to_le32(0x00000780); | |
753 | msg[8] = cpu_to_le32(0x00000440); | |
754 | msg[9] = cpu_to_le32(0x00000000); | |
755 | msg[10] = cpu_to_le32(0x01b37000); | |
f2ba57b5 | 756 | for (i = 11; i < 1024; ++i) |
9b1be4dc | 757 | msg[i] = cpu_to_le32(0x0); |
f2ba57b5 | 758 | |
feba9b0b CK |
759 | r = radeon_uvd_send_msg(rdev, ring, addr, fence); |
760 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
761 | return r; | |
f2ba57b5 CK |
762 | } |
763 | ||
764 | int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, | |
765 | uint32_t handle, struct radeon_fence **fence) | |
766 | { | |
feba9b0b CK |
767 | /* we use the last page of the vcpu bo for the UVD message */ |
768 | uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - | |
769 | RADEON_GPU_PAGE_SIZE; | |
f2ba57b5 | 770 | |
feba9b0b CK |
771 | uint32_t *msg = rdev->uvd.cpu_addr + offs; |
772 | uint64_t addr = rdev->uvd.gpu_addr + offs; | |
f2ba57b5 | 773 | |
feba9b0b | 774 | int r, i; |
f2ba57b5 | 775 | |
feba9b0b CK |
776 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); |
777 | if (r) | |
f2ba57b5 | 778 | return r; |
f2ba57b5 CK |
779 | |
780 | /* stitch together an UVD destroy msg */ | |
9b1be4dc AD |
781 | msg[0] = cpu_to_le32(0x00000de4); |
782 | msg[1] = cpu_to_le32(0x00000002); | |
783 | msg[2] = cpu_to_le32(handle); | |
784 | msg[3] = cpu_to_le32(0x00000000); | |
f2ba57b5 | 785 | for (i = 4; i < 1024; ++i) |
9b1be4dc | 786 | msg[i] = cpu_to_le32(0x0); |
f2ba57b5 | 787 | |
feba9b0b CK |
788 | r = radeon_uvd_send_msg(rdev, ring, addr, fence); |
789 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
790 | return r; | |
f2ba57b5 | 791 | } |
55b51c88 | 792 | |
85a129ca AD |
793 | /** |
794 | * radeon_uvd_count_handles - count number of open streams | |
795 | * | |
796 | * @rdev: radeon_device pointer | |
797 | * @sd: number of SD streams | |
798 | * @hd: number of HD streams | |
799 | * | |
800 | * Count the number of open SD/HD streams as a hint for power mangement | |
801 | */ | |
802 | static void radeon_uvd_count_handles(struct radeon_device *rdev, | |
803 | unsigned *sd, unsigned *hd) | |
804 | { | |
805 | unsigned i; | |
806 | ||
807 | *sd = 0; | |
808 | *hd = 0; | |
809 | ||
810 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
811 | if (!atomic_read(&rdev->uvd.handles[i])) | |
812 | continue; | |
813 | ||
814 | if (rdev->uvd.img_size[i] >= 720*576) | |
815 | ++(*hd); | |
816 | else | |
817 | ++(*sd); | |
818 | } | |
819 | } | |
820 | ||
55b51c88 CK |
821 | static void radeon_uvd_idle_work_handler(struct work_struct *work) |
822 | { | |
823 | struct radeon_device *rdev = | |
824 | container_of(work, struct radeon_device, uvd.idle_work.work); | |
825 | ||
8a227555 AD |
826 | if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) { |
827 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |
8158eb9e CK |
828 | radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd, |
829 | &rdev->pm.dpm.hd); | |
ce3537d5 | 830 | radeon_dpm_enable_uvd(rdev, false); |
8a227555 AD |
831 | } else { |
832 | radeon_set_uvd_clocks(rdev, 0, 0); | |
833 | } | |
834 | } else { | |
55b51c88 CK |
835 | schedule_delayed_work(&rdev->uvd.idle_work, |
836 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
8a227555 | 837 | } |
55b51c88 CK |
838 | } |
839 | ||
840 | void radeon_uvd_note_usage(struct radeon_device *rdev) | |
841 | { | |
ce3537d5 | 842 | bool streams_changed = false; |
55b51c88 CK |
843 | bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); |
844 | set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, | |
845 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
ce3537d5 AD |
846 | |
847 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |
848 | unsigned hd = 0, sd = 0; | |
849 | radeon_uvd_count_handles(rdev, &sd, &hd); | |
850 | if ((rdev->pm.dpm.sd != sd) || | |
851 | (rdev->pm.dpm.hd != hd)) { | |
852 | rdev->pm.dpm.sd = sd; | |
853 | rdev->pm.dpm.hd = hd; | |
0690a229 AD |
854 | /* disable this for now */ |
855 | /*streams_changed = true;*/ | |
ce3537d5 AD |
856 | } |
857 | } | |
858 | ||
859 | if (set_clocks || streams_changed) { | |
8a227555 | 860 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
ce3537d5 | 861 | radeon_dpm_enable_uvd(rdev, true); |
8a227555 AD |
862 | } else { |
863 | radeon_set_uvd_clocks(rdev, 53300, 40000); | |
864 | } | |
865 | } | |
55b51c88 | 866 | } |
facd112d CK |
867 | |
868 | static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, | |
869 | unsigned target_freq, | |
870 | unsigned pd_min, | |
871 | unsigned pd_even) | |
872 | { | |
873 | unsigned post_div = vco_freq / target_freq; | |
874 | ||
875 | /* adjust to post divider minimum value */ | |
876 | if (post_div < pd_min) | |
877 | post_div = pd_min; | |
878 | ||
879 | /* we alway need a frequency less than or equal the target */ | |
880 | if ((vco_freq / post_div) > target_freq) | |
881 | post_div += 1; | |
882 | ||
883 | /* post dividers above a certain value must be even */ | |
884 | if (post_div > pd_even && post_div % 2) | |
885 | post_div += 1; | |
886 | ||
887 | return post_div; | |
888 | } | |
889 | ||
890 | /** | |
891 | * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers | |
892 | * | |
893 | * @rdev: radeon_device pointer | |
894 | * @vclk: wanted VCLK | |
895 | * @dclk: wanted DCLK | |
896 | * @vco_min: minimum VCO frequency | |
897 | * @vco_max: maximum VCO frequency | |
898 | * @fb_factor: factor to multiply vco freq with | |
899 | * @fb_mask: limit and bitmask for feedback divider | |
900 | * @pd_min: post divider minimum | |
901 | * @pd_max: post divider maximum | |
902 | * @pd_even: post divider must be even above this value | |
903 | * @optimal_fb_div: resulting feedback divider | |
904 | * @optimal_vclk_div: resulting vclk post divider | |
905 | * @optimal_dclk_div: resulting dclk post divider | |
906 | * | |
907 | * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). | |
908 | * Returns zero on success -EINVAL on error. | |
909 | */ | |
910 | int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, | |
911 | unsigned vclk, unsigned dclk, | |
912 | unsigned vco_min, unsigned vco_max, | |
913 | unsigned fb_factor, unsigned fb_mask, | |
914 | unsigned pd_min, unsigned pd_max, | |
915 | unsigned pd_even, | |
916 | unsigned *optimal_fb_div, | |
917 | unsigned *optimal_vclk_div, | |
918 | unsigned *optimal_dclk_div) | |
919 | { | |
920 | unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; | |
921 | ||
922 | /* start off with something large */ | |
923 | unsigned optimal_score = ~0; | |
924 | ||
925 | /* loop through vco from low to high */ | |
926 | vco_min = max(max(vco_min, vclk), dclk); | |
927 | for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { | |
928 | ||
929 | uint64_t fb_div = (uint64_t)vco_freq * fb_factor; | |
930 | unsigned vclk_div, dclk_div, score; | |
931 | ||
932 | do_div(fb_div, ref_freq); | |
933 | ||
934 | /* fb div out of range ? */ | |
935 | if (fb_div > fb_mask) | |
936 | break; /* it can oly get worse */ | |
937 | ||
938 | fb_div &= fb_mask; | |
939 | ||
940 | /* calc vclk divider with current vco freq */ | |
941 | vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, | |
942 | pd_min, pd_even); | |
943 | if (vclk_div > pd_max) | |
944 | break; /* vco is too big, it has to stop */ | |
945 | ||
946 | /* calc dclk divider with current vco freq */ | |
947 | dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, | |
948 | pd_min, pd_even); | |
949 | if (vclk_div > pd_max) | |
950 | break; /* vco is too big, it has to stop */ | |
951 | ||
952 | /* calc score with current vco freq */ | |
953 | score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); | |
954 | ||
955 | /* determine if this vco setting is better than current optimal settings */ | |
956 | if (score < optimal_score) { | |
957 | *optimal_fb_div = fb_div; | |
958 | *optimal_vclk_div = vclk_div; | |
959 | *optimal_dclk_div = dclk_div; | |
960 | optimal_score = score; | |
961 | if (optimal_score == 0) | |
962 | break; /* it can't get better than this */ | |
963 | } | |
964 | } | |
965 | ||
966 | /* did we found a valid setup ? */ | |
967 | if (optimal_score == ~0) | |
968 | return -EINVAL; | |
969 | ||
970 | return 0; | |
971 | } | |
972 | ||
973 | int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, | |
974 | unsigned cg_upll_func_cntl) | |
975 | { | |
976 | unsigned i; | |
977 | ||
978 | /* make sure UPLL_CTLREQ is deasserted */ | |
979 | WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); | |
980 | ||
981 | mdelay(10); | |
982 | ||
983 | /* assert UPLL_CTLREQ */ | |
984 | WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); | |
985 | ||
986 | /* wait for CTLACK and CTLACK2 to get asserted */ | |
987 | for (i = 0; i < 100; ++i) { | |
988 | uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; | |
989 | if ((RREG32(cg_upll_func_cntl) & mask) == mask) | |
990 | break; | |
991 | mdelay(10); | |
992 | } | |
993 | ||
994 | /* deassert UPLL_CTLREQ */ | |
995 | WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); | |
996 | ||
997 | if (i == 100) { | |
998 | DRM_ERROR("Timeout setting UVD clocks!\n"); | |
999 | return -ETIMEDOUT; | |
1000 | } | |
1001 | ||
1002 | return 0; | |
1003 | } |