Commit | Line | Data |
---|---|---|
f2ba57b5 CK |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <drm/drmP.h> | |
34 | #include <drm/drm.h> | |
35 | ||
36 | #include "radeon.h" | |
37 | #include "r600d.h" | |
38 | ||
55b51c88 CK |
39 | /* 1 second timeout */ |
40 | #define UVD_IDLE_TIMEOUT_MS 1000 | |
41 | ||
f2ba57b5 CK |
42 | /* Firmware Names */ |
43 | #define FIRMWARE_RV710 "radeon/RV710_uvd.bin" | |
44 | #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" | |
45 | #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" | |
46 | #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" | |
47 | ||
48 | MODULE_FIRMWARE(FIRMWARE_RV710); | |
49 | MODULE_FIRMWARE(FIRMWARE_CYPRESS); | |
50 | MODULE_FIRMWARE(FIRMWARE_SUMO); | |
51 | MODULE_FIRMWARE(FIRMWARE_TAHITI); | |
52 | ||
55b51c88 CK |
53 | static void radeon_uvd_idle_work_handler(struct work_struct *work); |
54 | ||
f2ba57b5 CK |
55 | int radeon_uvd_init(struct radeon_device *rdev) |
56 | { | |
57 | struct platform_device *pdev; | |
58 | unsigned long bo_size; | |
59 | const char *fw_name; | |
60 | int i, r; | |
61 | ||
55b51c88 CK |
62 | INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); |
63 | ||
f2ba57b5 CK |
64 | pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0); |
65 | r = IS_ERR(pdev); | |
66 | if (r) { | |
67 | dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n"); | |
68 | return -EINVAL; | |
69 | } | |
70 | ||
71 | switch (rdev->family) { | |
72 | case CHIP_RV710: | |
73 | case CHIP_RV730: | |
74 | case CHIP_RV740: | |
75 | fw_name = FIRMWARE_RV710; | |
76 | break; | |
77 | ||
78 | case CHIP_CYPRESS: | |
79 | case CHIP_HEMLOCK: | |
80 | case CHIP_JUNIPER: | |
81 | case CHIP_REDWOOD: | |
82 | case CHIP_CEDAR: | |
83 | fw_name = FIRMWARE_CYPRESS; | |
84 | break; | |
85 | ||
86 | case CHIP_SUMO: | |
87 | case CHIP_SUMO2: | |
88 | case CHIP_PALM: | |
89 | case CHIP_CAYMAN: | |
90 | case CHIP_BARTS: | |
91 | case CHIP_TURKS: | |
92 | case CHIP_CAICOS: | |
93 | fw_name = FIRMWARE_SUMO; | |
94 | break; | |
95 | ||
96 | case CHIP_TAHITI: | |
97 | case CHIP_VERDE: | |
98 | case CHIP_PITCAIRN: | |
99 | case CHIP_ARUBA: | |
100 | fw_name = FIRMWARE_TAHITI; | |
101 | break; | |
102 | ||
103 | default: | |
104 | return -EINVAL; | |
105 | } | |
106 | ||
107 | r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev); | |
108 | if (r) { | |
109 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", | |
110 | fw_name); | |
111 | platform_device_unregister(pdev); | |
112 | return r; | |
113 | } | |
114 | ||
115 | platform_device_unregister(pdev); | |
116 | ||
d7c605a2 | 117 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
f2ba57b5 CK |
118 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; |
119 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, | |
120 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); | |
121 | if (r) { | |
122 | dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); | |
123 | return r; | |
124 | } | |
125 | ||
126 | r = radeon_uvd_resume(rdev); | |
127 | if (r) | |
128 | return r; | |
129 | ||
130 | memset(rdev->uvd.cpu_addr, 0, bo_size); | |
131 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); | |
132 | ||
133 | r = radeon_uvd_suspend(rdev); | |
134 | if (r) | |
135 | return r; | |
136 | ||
137 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
138 | atomic_set(&rdev->uvd.handles[i], 0); | |
139 | rdev->uvd.filp[i] = NULL; | |
140 | } | |
141 | ||
142 | return 0; | |
143 | } | |
144 | ||
145 | void radeon_uvd_fini(struct radeon_device *rdev) | |
146 | { | |
147 | radeon_uvd_suspend(rdev); | |
148 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | |
149 | } | |
150 | ||
151 | int radeon_uvd_suspend(struct radeon_device *rdev) | |
152 | { | |
153 | int r; | |
154 | ||
155 | if (rdev->uvd.vcpu_bo == NULL) | |
156 | return 0; | |
157 | ||
158 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); | |
159 | if (!r) { | |
160 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | |
161 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | |
089920f2 JG |
162 | rdev->uvd.cpu_addr = NULL; |
163 | if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { | |
164 | radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); | |
165 | } | |
f2ba57b5 | 166 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
089920f2 JG |
167 | |
168 | if (rdev->uvd.cpu_addr) { | |
169 | radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); | |
170 | } else { | |
171 | rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; | |
172 | } | |
f2ba57b5 CK |
173 | } |
174 | return r; | |
175 | } | |
176 | ||
177 | int radeon_uvd_resume(struct radeon_device *rdev) | |
178 | { | |
179 | int r; | |
180 | ||
181 | if (rdev->uvd.vcpu_bo == NULL) | |
182 | return -EINVAL; | |
183 | ||
184 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); | |
185 | if (r) { | |
186 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | |
187 | dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); | |
188 | return r; | |
189 | } | |
190 | ||
089920f2 JG |
191 | /* Have been pin in cpu unmap unpin */ |
192 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | |
193 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | |
194 | ||
f2ba57b5 CK |
195 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, |
196 | &rdev->uvd.gpu_addr); | |
197 | if (r) { | |
198 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
199 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | |
200 | dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); | |
201 | return r; | |
202 | } | |
203 | ||
204 | r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); | |
205 | if (r) { | |
206 | dev_err(rdev->dev, "(%d) UVD map failed\n", r); | |
207 | return r; | |
208 | } | |
209 | ||
210 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | |
211 | ||
212 | return 0; | |
213 | } | |
214 | ||
215 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) | |
216 | { | |
217 | rbo->placement.fpfn = 0 >> PAGE_SHIFT; | |
218 | rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | |
219 | } | |
220 | ||
221 | void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |
222 | { | |
223 | int i, r; | |
224 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
225 | if (rdev->uvd.filp[i] == filp) { | |
226 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); | |
227 | struct radeon_fence *fence; | |
228 | ||
229 | r = radeon_uvd_get_destroy_msg(rdev, | |
230 | R600_RING_TYPE_UVD_INDEX, handle, &fence); | |
231 | if (r) { | |
232 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
233 | continue; | |
234 | } | |
235 | ||
236 | radeon_fence_wait(fence, false); | |
237 | radeon_fence_unref(&fence); | |
238 | ||
239 | rdev->uvd.filp[i] = NULL; | |
240 | atomic_set(&rdev->uvd.handles[i], 0); | |
241 | } | |
242 | } | |
243 | } | |
244 | ||
245 | static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | |
246 | { | |
247 | unsigned stream_type = msg[4]; | |
248 | unsigned width = msg[6]; | |
249 | unsigned height = msg[7]; | |
250 | unsigned dpb_size = msg[9]; | |
251 | unsigned pitch = msg[28]; | |
252 | ||
253 | unsigned width_in_mb = width / 16; | |
254 | unsigned height_in_mb = ALIGN(height / 16, 2); | |
255 | ||
256 | unsigned image_size, tmp, min_dpb_size; | |
257 | ||
258 | image_size = width * height; | |
259 | image_size += image_size / 2; | |
260 | image_size = ALIGN(image_size, 1024); | |
261 | ||
262 | switch (stream_type) { | |
263 | case 0: /* H264 */ | |
264 | ||
265 | /* reference picture buffer */ | |
266 | min_dpb_size = image_size * 17; | |
267 | ||
268 | /* macroblock context buffer */ | |
269 | min_dpb_size += width_in_mb * height_in_mb * 17 * 192; | |
270 | ||
271 | /* IT surface buffer */ | |
272 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
273 | break; | |
274 | ||
275 | case 1: /* VC1 */ | |
276 | ||
277 | /* reference picture buffer */ | |
278 | min_dpb_size = image_size * 3; | |
279 | ||
280 | /* CONTEXT_BUFFER */ | |
281 | min_dpb_size += width_in_mb * height_in_mb * 128; | |
282 | ||
283 | /* IT surface buffer */ | |
284 | min_dpb_size += width_in_mb * 64; | |
285 | ||
286 | /* DB surface buffer */ | |
287 | min_dpb_size += width_in_mb * 128; | |
288 | ||
289 | /* BP */ | |
290 | tmp = max(width_in_mb, height_in_mb); | |
291 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | |
292 | break; | |
293 | ||
294 | case 3: /* MPEG2 */ | |
295 | ||
296 | /* reference picture buffer */ | |
297 | min_dpb_size = image_size * 3; | |
298 | break; | |
299 | ||
300 | case 4: /* MPEG4 */ | |
301 | ||
302 | /* reference picture buffer */ | |
303 | min_dpb_size = image_size * 3; | |
304 | ||
305 | /* CM */ | |
306 | min_dpb_size += width_in_mb * height_in_mb * 64; | |
307 | ||
308 | /* IT surface buffer */ | |
309 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | |
310 | break; | |
311 | ||
312 | default: | |
313 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | |
314 | return -EINVAL; | |
315 | } | |
316 | ||
317 | if (width > pitch) { | |
318 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | |
319 | return -EINVAL; | |
320 | } | |
321 | ||
322 | if (dpb_size < min_dpb_size) { | |
323 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | |
324 | dpb_size, min_dpb_size); | |
325 | return -EINVAL; | |
326 | } | |
327 | ||
328 | buf_sizes[0x1] = dpb_size; | |
329 | buf_sizes[0x2] = image_size; | |
330 | return 0; | |
331 | } | |
332 | ||
333 | static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |
334 | unsigned offset, unsigned buf_sizes[]) | |
335 | { | |
336 | int32_t *msg, msg_type, handle; | |
337 | void *ptr; | |
338 | ||
339 | int i, r; | |
340 | ||
341 | if (offset & 0x3F) { | |
342 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | |
343 | return -EINVAL; | |
344 | } | |
345 | ||
346 | r = radeon_bo_kmap(bo, &ptr); | |
347 | if (r) | |
348 | return r; | |
349 | ||
350 | msg = ptr + offset; | |
351 | ||
352 | msg_type = msg[1]; | |
353 | handle = msg[2]; | |
354 | ||
355 | if (handle == 0) { | |
356 | DRM_ERROR("Invalid UVD handle!\n"); | |
357 | return -EINVAL; | |
358 | } | |
359 | ||
360 | if (msg_type == 1) { | |
361 | /* it's a decode msg, calc buffer sizes */ | |
362 | r = radeon_uvd_cs_msg_decode(msg, buf_sizes); | |
363 | radeon_bo_kunmap(bo); | |
364 | if (r) | |
365 | return r; | |
366 | ||
367 | } else if (msg_type == 2) { | |
368 | /* it's a destroy msg, free the handle */ | |
369 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | |
370 | atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); | |
371 | radeon_bo_kunmap(bo); | |
372 | return 0; | |
373 | } else { | |
374 | /* it's a create msg, no special handling needed */ | |
375 | radeon_bo_kunmap(bo); | |
376 | } | |
377 | ||
378 | /* create or decode, validate the handle */ | |
379 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
380 | if (atomic_read(&p->rdev->uvd.handles[i]) == handle) | |
381 | return 0; | |
382 | } | |
383 | ||
384 | /* handle not found try to alloc a new one */ | |
385 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | |
386 | if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { | |
387 | p->rdev->uvd.filp[i] = p->filp; | |
388 | return 0; | |
389 | } | |
390 | } | |
391 | ||
392 | DRM_ERROR("No more free UVD handles!\n"); | |
393 | return -EINVAL; | |
394 | } | |
395 | ||
396 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |
397 | int data0, int data1, | |
398 | unsigned buf_sizes[]) | |
399 | { | |
400 | struct radeon_cs_chunk *relocs_chunk; | |
401 | struct radeon_cs_reloc *reloc; | |
402 | unsigned idx, cmd, offset; | |
403 | uint64_t start, end; | |
404 | int r; | |
405 | ||
406 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | |
407 | offset = radeon_get_ib_value(p, data0); | |
408 | idx = radeon_get_ib_value(p, data1); | |
409 | if (idx >= relocs_chunk->length_dw) { | |
410 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | |
411 | idx, relocs_chunk->length_dw); | |
412 | return -EINVAL; | |
413 | } | |
414 | ||
415 | reloc = p->relocs_ptr[(idx / 4)]; | |
416 | start = reloc->lobj.gpu_offset; | |
417 | end = start + radeon_bo_size(reloc->robj); | |
418 | start += offset; | |
419 | ||
420 | p->ib.ptr[data0] = start & 0xFFFFFFFF; | |
421 | p->ib.ptr[data1] = start >> 32; | |
422 | ||
423 | cmd = radeon_get_ib_value(p, p->idx) >> 1; | |
424 | ||
425 | if (cmd < 0x4) { | |
426 | if ((end - start) < buf_sizes[cmd]) { | |
427 | DRM_ERROR("buffer to small (%d / %d)!\n", | |
428 | (unsigned)(end - start), buf_sizes[cmd]); | |
429 | return -EINVAL; | |
430 | } | |
431 | ||
432 | } else if (cmd != 0x100) { | |
433 | DRM_ERROR("invalid UVD command %X!\n", cmd); | |
434 | return -EINVAL; | |
435 | } | |
436 | ||
a92c7d55 CK |
437 | if ((start >> 28) != (end >> 28)) { |
438 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | |
439 | start, end); | |
440 | return -EINVAL; | |
f2ba57b5 CK |
441 | } |
442 | ||
a92c7d55 CK |
443 | /* TODO: is this still necessary on NI+ ? */ |
444 | if ((cmd == 0 || cmd == 0x3) && | |
445 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { | |
446 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | |
f2ba57b5 CK |
447 | start, end); |
448 | return -EINVAL; | |
449 | } | |
450 | ||
a92c7d55 CK |
451 | if (cmd == 0) { |
452 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); | |
453 | if (r) | |
454 | return r; | |
455 | } | |
456 | ||
f2ba57b5 CK |
457 | return 0; |
458 | } | |
459 | ||
460 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | |
461 | struct radeon_cs_packet *pkt, | |
462 | int *data0, int *data1, | |
463 | unsigned buf_sizes[]) | |
464 | { | |
465 | int i, r; | |
466 | ||
467 | p->idx++; | |
468 | for (i = 0; i <= pkt->count; ++i) { | |
469 | switch (pkt->reg + i*4) { | |
470 | case UVD_GPCOM_VCPU_DATA0: | |
471 | *data0 = p->idx; | |
472 | break; | |
473 | case UVD_GPCOM_VCPU_DATA1: | |
474 | *data1 = p->idx; | |
475 | break; | |
476 | case UVD_GPCOM_VCPU_CMD: | |
477 | r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); | |
478 | if (r) | |
479 | return r; | |
480 | break; | |
481 | case UVD_ENGINE_CNTL: | |
482 | break; | |
483 | default: | |
484 | DRM_ERROR("Invalid reg 0x%X!\n", | |
485 | pkt->reg + i*4); | |
486 | return -EINVAL; | |
487 | } | |
488 | p->idx++; | |
489 | } | |
490 | return 0; | |
491 | } | |
492 | ||
493 | int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |
494 | { | |
495 | struct radeon_cs_packet pkt; | |
496 | int r, data0 = 0, data1 = 0; | |
497 | ||
498 | /* minimum buffer sizes */ | |
499 | unsigned buf_sizes[] = { | |
500 | [0x00000000] = 2048, | |
501 | [0x00000001] = 32 * 1024 * 1024, | |
502 | [0x00000002] = 2048 * 1152 * 3, | |
503 | [0x00000003] = 2048, | |
504 | }; | |
505 | ||
506 | if (p->chunks[p->chunk_ib_idx].length_dw % 16) { | |
507 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | |
508 | p->chunks[p->chunk_ib_idx].length_dw); | |
509 | return -EINVAL; | |
510 | } | |
511 | ||
512 | if (p->chunk_relocs_idx == -1) { | |
513 | DRM_ERROR("No relocation chunk !\n"); | |
514 | return -EINVAL; | |
515 | } | |
516 | ||
517 | ||
518 | do { | |
519 | r = radeon_cs_packet_parse(p, &pkt, p->idx); | |
520 | if (r) | |
521 | return r; | |
522 | switch (pkt.type) { | |
523 | case RADEON_PACKET_TYPE0: | |
524 | r = radeon_uvd_cs_reg(p, &pkt, &data0, | |
525 | &data1, buf_sizes); | |
526 | if (r) | |
527 | return r; | |
528 | break; | |
529 | case RADEON_PACKET_TYPE2: | |
530 | p->idx += pkt.count + 2; | |
531 | break; | |
532 | default: | |
533 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); | |
534 | return -EINVAL; | |
535 | } | |
536 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | |
537 | return 0; | |
538 | } | |
539 | ||
540 | static int radeon_uvd_send_msg(struct radeon_device *rdev, | |
541 | int ring, struct radeon_bo *bo, | |
542 | struct radeon_fence **fence) | |
543 | { | |
544 | struct ttm_validate_buffer tv; | |
545 | struct list_head head; | |
546 | struct radeon_ib ib; | |
547 | uint64_t addr; | |
548 | int i, r; | |
549 | ||
550 | memset(&tv, 0, sizeof(tv)); | |
551 | tv.bo = &bo->tbo; | |
552 | ||
553 | INIT_LIST_HEAD(&head); | |
554 | list_add(&tv.head, &head); | |
555 | ||
556 | r = ttm_eu_reserve_buffers(&head); | |
557 | if (r) | |
558 | return r; | |
559 | ||
560 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); | |
561 | radeon_uvd_force_into_uvd_segment(bo); | |
562 | ||
563 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
564 | if (r) { | |
565 | ttm_eu_backoff_reservation(&head); | |
566 | return r; | |
567 | } | |
568 | ||
569 | r = radeon_ib_get(rdev, ring, &ib, NULL, 16); | |
570 | if (r) { | |
571 | ttm_eu_backoff_reservation(&head); | |
572 | return r; | |
573 | } | |
574 | ||
575 | addr = radeon_bo_gpu_offset(bo); | |
576 | ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); | |
577 | ib.ptr[1] = addr; | |
578 | ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); | |
579 | ib.ptr[3] = addr >> 32; | |
580 | ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); | |
581 | ib.ptr[5] = 0; | |
582 | for (i = 6; i < 16; ++i) | |
583 | ib.ptr[i] = PACKET2(0); | |
584 | ib.length_dw = 16; | |
585 | ||
586 | r = radeon_ib_schedule(rdev, &ib, NULL); | |
587 | if (r) { | |
588 | ttm_eu_backoff_reservation(&head); | |
589 | return r; | |
590 | } | |
591 | ttm_eu_fence_buffer_objects(&head, ib.fence); | |
592 | ||
593 | if (fence) | |
594 | *fence = radeon_fence_ref(ib.fence); | |
595 | ||
596 | radeon_ib_free(rdev, &ib); | |
597 | radeon_bo_unref(&bo); | |
598 | return 0; | |
599 | } | |
600 | ||
601 | /* multiple fence commands without any stream commands in between can | |
602 | crash the vcpu so just try to emmit a dummy create/destroy msg to | |
603 | avoid this */ | |
604 | int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, | |
605 | uint32_t handle, struct radeon_fence **fence) | |
606 | { | |
607 | struct radeon_bo *bo; | |
608 | uint32_t *msg; | |
609 | int r, i; | |
610 | ||
611 | r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, | |
612 | RADEON_GEM_DOMAIN_VRAM, NULL, &bo); | |
613 | if (r) | |
614 | return r; | |
615 | ||
616 | r = radeon_bo_reserve(bo, false); | |
617 | if (r) { | |
618 | radeon_bo_unref(&bo); | |
619 | return r; | |
620 | } | |
621 | ||
622 | r = radeon_bo_kmap(bo, (void **)&msg); | |
623 | if (r) { | |
624 | radeon_bo_unreserve(bo); | |
625 | radeon_bo_unref(&bo); | |
626 | return r; | |
627 | } | |
628 | ||
629 | /* stitch together an UVD create msg */ | |
630 | msg[0] = 0x00000de4; | |
631 | msg[1] = 0x00000000; | |
632 | msg[2] = handle; | |
633 | msg[3] = 0x00000000; | |
634 | msg[4] = 0x00000000; | |
635 | msg[5] = 0x00000000; | |
636 | msg[6] = 0x00000000; | |
637 | msg[7] = 0x00000780; | |
638 | msg[8] = 0x00000440; | |
639 | msg[9] = 0x00000000; | |
640 | msg[10] = 0x01b37000; | |
641 | for (i = 11; i < 1024; ++i) | |
642 | msg[i] = 0x0; | |
643 | ||
644 | radeon_bo_kunmap(bo); | |
645 | radeon_bo_unreserve(bo); | |
646 | ||
647 | return radeon_uvd_send_msg(rdev, ring, bo, fence); | |
648 | } | |
649 | ||
650 | int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, | |
651 | uint32_t handle, struct radeon_fence **fence) | |
652 | { | |
653 | struct radeon_bo *bo; | |
654 | uint32_t *msg; | |
655 | int r, i; | |
656 | ||
657 | r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, | |
658 | RADEON_GEM_DOMAIN_VRAM, NULL, &bo); | |
659 | if (r) | |
660 | return r; | |
661 | ||
662 | r = radeon_bo_reserve(bo, false); | |
663 | if (r) { | |
664 | radeon_bo_unref(&bo); | |
665 | return r; | |
666 | } | |
667 | ||
668 | r = radeon_bo_kmap(bo, (void **)&msg); | |
669 | if (r) { | |
670 | radeon_bo_unreserve(bo); | |
671 | radeon_bo_unref(&bo); | |
672 | return r; | |
673 | } | |
674 | ||
675 | /* stitch together an UVD destroy msg */ | |
676 | msg[0] = 0x00000de4; | |
677 | msg[1] = 0x00000002; | |
678 | msg[2] = handle; | |
679 | msg[3] = 0x00000000; | |
680 | for (i = 4; i < 1024; ++i) | |
681 | msg[i] = 0x0; | |
682 | ||
683 | radeon_bo_kunmap(bo); | |
684 | radeon_bo_unreserve(bo); | |
685 | ||
686 | return radeon_uvd_send_msg(rdev, ring, bo, fence); | |
687 | } | |
55b51c88 CK |
688 | |
689 | static void radeon_uvd_idle_work_handler(struct work_struct *work) | |
690 | { | |
691 | struct radeon_device *rdev = | |
692 | container_of(work, struct radeon_device, uvd.idle_work.work); | |
693 | ||
694 | if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) | |
695 | radeon_set_uvd_clocks(rdev, 0, 0); | |
696 | else | |
697 | schedule_delayed_work(&rdev->uvd.idle_work, | |
698 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
699 | } | |
700 | ||
701 | void radeon_uvd_note_usage(struct radeon_device *rdev) | |
702 | { | |
703 | bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); | |
704 | set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, | |
705 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
706 | if (set_clocks) | |
707 | radeon_set_uvd_clocks(rdev, 53300, 40000); | |
708 | } | |
facd112d CK |
709 | |
710 | static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, | |
711 | unsigned target_freq, | |
712 | unsigned pd_min, | |
713 | unsigned pd_even) | |
714 | { | |
715 | unsigned post_div = vco_freq / target_freq; | |
716 | ||
717 | /* adjust to post divider minimum value */ | |
718 | if (post_div < pd_min) | |
719 | post_div = pd_min; | |
720 | ||
721 | /* we alway need a frequency less than or equal the target */ | |
722 | if ((vco_freq / post_div) > target_freq) | |
723 | post_div += 1; | |
724 | ||
725 | /* post dividers above a certain value must be even */ | |
726 | if (post_div > pd_even && post_div % 2) | |
727 | post_div += 1; | |
728 | ||
729 | return post_div; | |
730 | } | |
731 | ||
732 | /** | |
733 | * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers | |
734 | * | |
735 | * @rdev: radeon_device pointer | |
736 | * @vclk: wanted VCLK | |
737 | * @dclk: wanted DCLK | |
738 | * @vco_min: minimum VCO frequency | |
739 | * @vco_max: maximum VCO frequency | |
740 | * @fb_factor: factor to multiply vco freq with | |
741 | * @fb_mask: limit and bitmask for feedback divider | |
742 | * @pd_min: post divider minimum | |
743 | * @pd_max: post divider maximum | |
744 | * @pd_even: post divider must be even above this value | |
745 | * @optimal_fb_div: resulting feedback divider | |
746 | * @optimal_vclk_div: resulting vclk post divider | |
747 | * @optimal_dclk_div: resulting dclk post divider | |
748 | * | |
749 | * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). | |
750 | * Returns zero on success -EINVAL on error. | |
751 | */ | |
752 | int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, | |
753 | unsigned vclk, unsigned dclk, | |
754 | unsigned vco_min, unsigned vco_max, | |
755 | unsigned fb_factor, unsigned fb_mask, | |
756 | unsigned pd_min, unsigned pd_max, | |
757 | unsigned pd_even, | |
758 | unsigned *optimal_fb_div, | |
759 | unsigned *optimal_vclk_div, | |
760 | unsigned *optimal_dclk_div) | |
761 | { | |
762 | unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; | |
763 | ||
764 | /* start off with something large */ | |
765 | unsigned optimal_score = ~0; | |
766 | ||
767 | /* loop through vco from low to high */ | |
768 | vco_min = max(max(vco_min, vclk), dclk); | |
769 | for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { | |
770 | ||
771 | uint64_t fb_div = (uint64_t)vco_freq * fb_factor; | |
772 | unsigned vclk_div, dclk_div, score; | |
773 | ||
774 | do_div(fb_div, ref_freq); | |
775 | ||
776 | /* fb div out of range ? */ | |
777 | if (fb_div > fb_mask) | |
778 | break; /* it can oly get worse */ | |
779 | ||
780 | fb_div &= fb_mask; | |
781 | ||
782 | /* calc vclk divider with current vco freq */ | |
783 | vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, | |
784 | pd_min, pd_even); | |
785 | if (vclk_div > pd_max) | |
786 | break; /* vco is too big, it has to stop */ | |
787 | ||
788 | /* calc dclk divider with current vco freq */ | |
789 | dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, | |
790 | pd_min, pd_even); | |
791 | if (vclk_div > pd_max) | |
792 | break; /* vco is too big, it has to stop */ | |
793 | ||
794 | /* calc score with current vco freq */ | |
795 | score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); | |
796 | ||
797 | /* determine if this vco setting is better than current optimal settings */ | |
798 | if (score < optimal_score) { | |
799 | *optimal_fb_div = fb_div; | |
800 | *optimal_vclk_div = vclk_div; | |
801 | *optimal_dclk_div = dclk_div; | |
802 | optimal_score = score; | |
803 | if (optimal_score == 0) | |
804 | break; /* it can't get better than this */ | |
805 | } | |
806 | } | |
807 | ||
808 | /* did we found a valid setup ? */ | |
809 | if (optimal_score == ~0) | |
810 | return -EINVAL; | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, | |
816 | unsigned cg_upll_func_cntl) | |
817 | { | |
818 | unsigned i; | |
819 | ||
820 | /* make sure UPLL_CTLREQ is deasserted */ | |
821 | WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); | |
822 | ||
823 | mdelay(10); | |
824 | ||
825 | /* assert UPLL_CTLREQ */ | |
826 | WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); | |
827 | ||
828 | /* wait for CTLACK and CTLACK2 to get asserted */ | |
829 | for (i = 0; i < 100; ++i) { | |
830 | uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; | |
831 | if ((RREG32(cg_upll_func_cntl) & mask) == mask) | |
832 | break; | |
833 | mdelay(10); | |
834 | } | |
835 | ||
836 | /* deassert UPLL_CTLREQ */ | |
837 | WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); | |
838 | ||
839 | if (i == 100) { | |
840 | DRM_ERROR("Timeout setting UVD clocks!\n"); | |
841 | return -ETIMEDOUT; | |
842 | } | |
843 | ||
844 | return 0; | |
845 | } |