drm/amd/pp: Make sure clock_voltage_limit_table on dc is valid
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_uvd.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
08086635 43#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
4cb5877c
CK
44
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
8e008dd7 51/* Polaris10/11 firmware version */
4cb5877c 52#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
d38ceaf9
AD
53
54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
edf600da
CK
57#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
d38ceaf9
AD
60#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
61#endif
c65444fe
JZ
62#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
974ee3db 64#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
a39c8cea 65#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
2cc0c0b5 66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
925a51c4 67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
c4642a47 68#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
ba8f7ad0 69#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
d38ceaf9 70
09bfb891 71#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
2327e626 72#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
cac18c82 73#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
09bfb891 74
9181dba6
JZ
75/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
76#define UVD_GPCOM_VCPU_CMD 0x03c3
77#define UVD_GPCOM_VCPU_DATA0 0x03c4
78#define UVD_GPCOM_VCPU_DATA1 0x03c5
79#define UVD_NO_OP 0x03ff
80#define UVD_BASE_SI 0x3800
09bfb891 81
d38ceaf9
AD
82/**
83 * amdgpu_uvd_cs_ctx - Command submission parser context
84 *
85 * Used for emulating virtual memory support on UVD 4.2.
86 */
87struct amdgpu_uvd_cs_ctx {
88 struct amdgpu_cs_parser *parser;
89 unsigned reg, count;
90 unsigned data0, data1;
91 unsigned idx;
92 unsigned ib_idx;
93
94 /* does the IB has a msg command */
95 bool has_msg_cmd;
96
97 /* minimum buffer sizes */
98 unsigned *buf_sizes;
99};
100
101#ifdef CONFIG_DRM_AMDGPU_CIK
102MODULE_FIRMWARE(FIRMWARE_BONAIRE);
103MODULE_FIRMWARE(FIRMWARE_KABINI);
104MODULE_FIRMWARE(FIRMWARE_KAVERI);
105MODULE_FIRMWARE(FIRMWARE_HAWAII);
106MODULE_FIRMWARE(FIRMWARE_MULLINS);
107#endif
108MODULE_FIRMWARE(FIRMWARE_TONGA);
109MODULE_FIRMWARE(FIRMWARE_CARRIZO);
974ee3db 110MODULE_FIRMWARE(FIRMWARE_FIJI);
a39c8cea 111MODULE_FIRMWARE(FIRMWARE_STONEY);
2cc0c0b5
FC
112MODULE_FIRMWARE(FIRMWARE_POLARIS10);
113MODULE_FIRMWARE(FIRMWARE_POLARIS11);
c4642a47 114MODULE_FIRMWARE(FIRMWARE_POLARIS12);
ba8f7ad0 115MODULE_FIRMWARE(FIRMWARE_VEGAM);
d38ceaf9 116
09bfb891 117MODULE_FIRMWARE(FIRMWARE_VEGA10);
2327e626 118MODULE_FIRMWARE(FIRMWARE_VEGA12);
cac18c82 119MODULE_FIRMWARE(FIRMWARE_VEGA20);
09bfb891 120
d38ceaf9
AD
121static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
122
123int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
124{
ead833ec 125 struct amdgpu_ring *ring;
1b1f42d8 126 struct drm_sched_rq *rq;
d38ceaf9
AD
127 unsigned long bo_size;
128 const char *fw_name;
129 const struct common_firmware_header *hdr;
130 unsigned version_major, version_minor, family_id;
10dd74ea 131 int i, j, r;
d38ceaf9 132
2bb795f5 133 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
d38ceaf9
AD
134
135 switch (adev->asic_type) {
136#ifdef CONFIG_DRM_AMDGPU_CIK
137 case CHIP_BONAIRE:
138 fw_name = FIRMWARE_BONAIRE;
139 break;
140 case CHIP_KABINI:
141 fw_name = FIRMWARE_KABINI;
142 break;
143 case CHIP_KAVERI:
144 fw_name = FIRMWARE_KAVERI;
145 break;
146 case CHIP_HAWAII:
147 fw_name = FIRMWARE_HAWAII;
148 break;
149 case CHIP_MULLINS:
150 fw_name = FIRMWARE_MULLINS;
151 break;
152#endif
153 case CHIP_TONGA:
154 fw_name = FIRMWARE_TONGA;
155 break;
974ee3db
DZ
156 case CHIP_FIJI:
157 fw_name = FIRMWARE_FIJI;
158 break;
d38ceaf9
AD
159 case CHIP_CARRIZO:
160 fw_name = FIRMWARE_CARRIZO;
161 break;
a39c8cea
SL
162 case CHIP_STONEY:
163 fw_name = FIRMWARE_STONEY;
164 break;
2cc0c0b5
FC
165 case CHIP_POLARIS10:
166 fw_name = FIRMWARE_POLARIS10;
38d75817 167 break;
2cc0c0b5
FC
168 case CHIP_POLARIS11:
169 fw_name = FIRMWARE_POLARIS11;
c4642a47 170 break;
2327e626
AD
171 case CHIP_POLARIS12:
172 fw_name = FIRMWARE_POLARIS12;
173 break;
09bfb891
LL
174 case CHIP_VEGA10:
175 fw_name = FIRMWARE_VEGA10;
176 break;
2327e626
AD
177 case CHIP_VEGA12:
178 fw_name = FIRMWARE_VEGA12;
38d75817 179 break;
ba8f7ad0
LL
180 case CHIP_VEGAM:
181 fw_name = FIRMWARE_VEGAM;
182 break;
cac18c82
FX
183 case CHIP_VEGA20:
184 fw_name = FIRMWARE_VEGA20;
185 break;
d38ceaf9
AD
186 default:
187 return -EINVAL;
188 }
189
190 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
191 if (r) {
192 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
193 fw_name);
194 return r;
195 }
196
197 r = amdgpu_ucode_validate(adev->uvd.fw);
198 if (r) {
199 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
200 fw_name);
201 release_firmware(adev->uvd.fw);
202 adev->uvd.fw = NULL;
203 return r;
204 }
205
c0365541
AN
206 /* Set the default UVD handles that the firmware can handle */
207 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
208
d38ceaf9
AD
209 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
210 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
211 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
212 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
213 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
214 version_major, version_minor, family_id);
215
c0365541
AN
216 /*
217 * Limit the number of UVD handles depending on microcode major
218 * and minor versions. The firmware version which has 40 UVD
219 * instances support is 1.80. So all subsequent versions should
220 * also have the same support.
221 */
222 if ((version_major > 0x01) ||
223 ((version_major == 0x01) && (version_minor >= 0x50)))
224 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
225
562e2689
SJ
226 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
227 (family_id << 8));
228
8e008dd7
SJ
229 if ((adev->asic_type == CHIP_POLARIS10 ||
230 adev->asic_type == CHIP_POLARIS11) &&
231 (adev->uvd.fw_version < FW_1_66_16))
232 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
233 version_major, version_minor);
234
09bfb891 235 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
c0365541 236 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
09bfb891
LL
237 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
238 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
239
10dd74ea 240 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
d38ceaf9 241
10dd74ea
JZ
242 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
243 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
244 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
245 if (r) {
246 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
247 return r;
248 }
ead833ec 249
10dd74ea
JZ
250 ring = &adev->uvd.inst[j].ring;
251 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
252 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
253 rq, NULL);
254 if (r != 0) {
255 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
256 return r;
257 }
d38ceaf9 258
10dd74ea
JZ
259 for (i = 0; i < adev->uvd.max_handles; ++i) {
260 atomic_set(&adev->uvd.inst[j].handles[i], 0);
261 adev->uvd.inst[j].filp[i] = NULL;
262 }
263 }
d38ceaf9 264 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
2990a1fc 265 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
d38ceaf9
AD
266 adev->uvd.address_64_bit = true;
267
4cb5877c
CK
268 switch (adev->asic_type) {
269 case CHIP_TONGA:
270 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
271 break;
272 case CHIP_CARRIZO:
273 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
274 break;
275 case CHIP_FIJI:
276 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
277 break;
278 case CHIP_STONEY:
279 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
280 break;
281 default:
282 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
283 }
284
d38ceaf9
AD
285 return 0;
286}
287
288int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
289{
10dd74ea 290 int i, j;
d38ceaf9 291
10dd74ea
JZ
292 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
293 kfree(adev->uvd.inst[j].saved_bo);
ead833ec 294
10dd74ea 295 drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
d38ceaf9 296
10dd74ea
JZ
297 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
298 &adev->uvd.inst[j].gpu_addr,
299 (void **)&adev->uvd.inst[j].cpu_addr);
d38ceaf9 300
10dd74ea 301 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
4ff184d7 302
10dd74ea
JZ
303 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
304 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
305 }
d38ceaf9
AD
306 release_firmware(adev->uvd.fw);
307
308 return 0;
309}
310
311int amdgpu_uvd_suspend(struct amdgpu_device *adev)
312{
3f99dd81
LL
313 unsigned size;
314 void *ptr;
10dd74ea 315 int i, j;
d38ceaf9 316
10dd74ea
JZ
317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
318 if (adev->uvd.inst[j].vcpu_bo == NULL)
319 continue;
d38ceaf9 320
10dd74ea 321 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
8daf94e9 322
10dd74ea
JZ
323 /* only valid for physical mode */
324 if (adev->asic_type < CHIP_POLARIS10) {
325 for (i = 0; i < adev->uvd.max_handles; ++i)
326 if (atomic_read(&adev->uvd.inst[j].handles[i]))
327 break;
f6c3b601 328
10dd74ea
JZ
329 if (i == adev->uvd.max_handles)
330 continue;
331 }
d38ceaf9 332
10dd74ea
JZ
333 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
334 ptr = adev->uvd.inst[j].cpu_addr;
d38ceaf9 335
10dd74ea
JZ
336 adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
337 if (!adev->uvd.inst[j].saved_bo)
338 return -ENOMEM;
d38ceaf9 339
10dd74ea
JZ
340 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
341 }
d38ceaf9
AD
342 return 0;
343}
344
345int amdgpu_uvd_resume(struct amdgpu_device *adev)
346{
347 unsigned size;
348 void *ptr;
10dd74ea 349 int i;
d38ceaf9 350
10dd74ea
JZ
351 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
352 if (adev->uvd.inst[i].vcpu_bo == NULL)
353 return -EINVAL;
d38ceaf9 354
10dd74ea
JZ
355 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
356 ptr = adev->uvd.inst[i].cpu_addr;
d38ceaf9 357
10dd74ea
JZ
358 if (adev->uvd.inst[i].saved_bo != NULL) {
359 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
360 kfree(adev->uvd.inst[i].saved_bo);
361 adev->uvd.inst[i].saved_bo = NULL;
362 } else {
363 const struct common_firmware_header *hdr;
364 unsigned offset;
365
366 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
367 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
368 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
369 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
370 le32_to_cpu(hdr->ucode_size_bytes));
371 size -= le32_to_cpu(hdr->ucode_size_bytes);
372 ptr += le32_to_cpu(hdr->ucode_size_bytes);
373 }
374 memset_io(ptr, 0, size);
375 /* to restore uvd fence seq */
376 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
09bfb891 377 }
d23be4e3 378 }
d38ceaf9
AD
379 return 0;
380}
381
382void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
383{
10dd74ea
JZ
384 struct amdgpu_ring *ring;
385 int i, j, r;
d38ceaf9 386
10dd74ea
JZ
387 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
388 ring = &adev->uvd.inst[j].ring;
d38ceaf9 389
10dd74ea
JZ
390 for (i = 0; i < adev->uvd.max_handles; ++i) {
391 uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
392 if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
393 struct dma_fence *fence;
394
395 r = amdgpu_uvd_get_destroy_msg(ring, handle,
396 false, &fence);
397 if (r) {
398 DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
399 continue;
400 }
d38ceaf9 401
10dd74ea
JZ
402 dma_fence_wait(fence, false);
403 dma_fence_put(fence);
404
405 adev->uvd.inst[j].filp[i] = NULL;
406 atomic_set(&adev->uvd.inst[j].handles[i], 0);
407 }
d38ceaf9
AD
408 }
409 }
410}
411
765e7fbf 412static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
d38ceaf9
AD
413{
414 int i;
765e7fbf
CK
415 for (i = 0; i < abo->placement.num_placement; ++i) {
416 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
417 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
d38ceaf9
AD
418 }
419}
420
80983e4d
AD
421static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
422{
423 uint32_t lo, hi;
424 uint64_t addr;
425
426 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
427 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
428 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
429
430 return addr;
431}
432
d38ceaf9
AD
433/**
434 * amdgpu_uvd_cs_pass1 - first parsing round
435 *
436 * @ctx: UVD parser context
437 *
438 * Make sure UVD message and feedback buffers are in VRAM and
439 * nobody is violating an 256MB boundary.
440 */
441static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
442{
19be5570 443 struct ttm_operation_ctx tctx = { false, false };
d38ceaf9
AD
444 struct amdgpu_bo_va_mapping *mapping;
445 struct amdgpu_bo *bo;
80983e4d
AD
446 uint32_t cmd;
447 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
d38ceaf9
AD
448 int r = 0;
449
9cca0b8e
CK
450 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
451 if (r) {
d38ceaf9 452 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
9cca0b8e 453 return r;
d38ceaf9
AD
454 }
455
456 if (!ctx->parser->adev->uvd.address_64_bit) {
457 /* check if it's a message or feedback command */
458 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
459 if (cmd == 0x0 || cmd == 0x3) {
460 /* yes, force it into VRAM */
461 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
462 amdgpu_ttm_placement_from_domain(bo, domain);
463 }
464 amdgpu_uvd_force_into_uvd_segment(bo);
465
19be5570 466 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
d38ceaf9
AD
467 }
468
469 return r;
470}
471
472/**
473 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
474 *
475 * @msg: pointer to message structure
476 * @buf_sizes: returned buffer sizes
477 *
478 * Peek into the decode message and calculate the necessary buffer sizes.
479 */
8e008dd7
SJ
480static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
481 unsigned buf_sizes[])
d38ceaf9
AD
482{
483 unsigned stream_type = msg[4];
484 unsigned width = msg[6];
485 unsigned height = msg[7];
486 unsigned dpb_size = msg[9];
487 unsigned pitch = msg[28];
488 unsigned level = msg[57];
489
490 unsigned width_in_mb = width / 16;
491 unsigned height_in_mb = ALIGN(height / 16, 2);
492 unsigned fs_in_mb = width_in_mb * height_in_mb;
493
21df89a5 494 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
e5a6858d 495 unsigned min_ctx_size = ~0;
d38ceaf9
AD
496
497 image_size = width * height;
498 image_size += image_size / 2;
499 image_size = ALIGN(image_size, 1024);
500
501 switch (stream_type) {
502 case 0: /* H264 */
d38ceaf9
AD
503 switch(level) {
504 case 30:
505 num_dpb_buffer = 8100 / fs_in_mb;
506 break;
507 case 31:
508 num_dpb_buffer = 18000 / fs_in_mb;
509 break;
510 case 32:
511 num_dpb_buffer = 20480 / fs_in_mb;
512 break;
513 case 41:
514 num_dpb_buffer = 32768 / fs_in_mb;
515 break;
516 case 42:
517 num_dpb_buffer = 34816 / fs_in_mb;
518 break;
519 case 50:
520 num_dpb_buffer = 110400 / fs_in_mb;
521 break;
522 case 51:
523 num_dpb_buffer = 184320 / fs_in_mb;
524 break;
525 default:
526 num_dpb_buffer = 184320 / fs_in_mb;
527 break;
528 }
529 num_dpb_buffer++;
530 if (num_dpb_buffer > 17)
531 num_dpb_buffer = 17;
532
533 /* reference picture buffer */
534 min_dpb_size = image_size * num_dpb_buffer;
535
536 /* macroblock context buffer */
537 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
538
539 /* IT surface buffer */
540 min_dpb_size += width_in_mb * height_in_mb * 32;
541 break;
542
543 case 1: /* VC1 */
544
545 /* reference picture buffer */
546 min_dpb_size = image_size * 3;
547
548 /* CONTEXT_BUFFER */
549 min_dpb_size += width_in_mb * height_in_mb * 128;
550
551 /* IT surface buffer */
552 min_dpb_size += width_in_mb * 64;
553
554 /* DB surface buffer */
555 min_dpb_size += width_in_mb * 128;
556
557 /* BP */
558 tmp = max(width_in_mb, height_in_mb);
559 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
560 break;
561
562 case 3: /* MPEG2 */
563
564 /* reference picture buffer */
565 min_dpb_size = image_size * 3;
566 break;
567
568 case 4: /* MPEG4 */
569
570 /* reference picture buffer */
571 min_dpb_size = image_size * 3;
572
573 /* CM */
574 min_dpb_size += width_in_mb * height_in_mb * 64;
575
576 /* IT surface buffer */
577 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
578 break;
579
8e008dd7
SJ
580 case 7: /* H264 Perf */
581 switch(level) {
582 case 30:
583 num_dpb_buffer = 8100 / fs_in_mb;
584 break;
585 case 31:
586 num_dpb_buffer = 18000 / fs_in_mb;
587 break;
588 case 32:
589 num_dpb_buffer = 20480 / fs_in_mb;
590 break;
591 case 41:
592 num_dpb_buffer = 32768 / fs_in_mb;
593 break;
594 case 42:
595 num_dpb_buffer = 34816 / fs_in_mb;
596 break;
597 case 50:
598 num_dpb_buffer = 110400 / fs_in_mb;
599 break;
600 case 51:
601 num_dpb_buffer = 184320 / fs_in_mb;
602 break;
603 default:
604 num_dpb_buffer = 184320 / fs_in_mb;
605 break;
606 }
607 num_dpb_buffer++;
608 if (num_dpb_buffer > 17)
609 num_dpb_buffer = 17;
610
611 /* reference picture buffer */
612 min_dpb_size = image_size * num_dpb_buffer;
613
4cb5877c 614 if (!adev->uvd.use_ctx_buf){
8e008dd7
SJ
615 /* macroblock context buffer */
616 min_dpb_size +=
617 width_in_mb * height_in_mb * num_dpb_buffer * 192;
618
619 /* IT surface buffer */
620 min_dpb_size += width_in_mb * height_in_mb * 32;
621 } else {
622 /* macroblock context buffer */
623 min_ctx_size =
624 width_in_mb * height_in_mb * num_dpb_buffer * 192;
625 }
626 break;
627
d0b83d41
LL
628 case 8: /* MJPEG */
629 min_dpb_size = 0;
630 break;
631
86fa0bdc
CK
632 case 16: /* H265 */
633 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
634 image_size = ALIGN(image_size, 256);
635
636 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
637 min_dpb_size = image_size * num_dpb_buffer;
8c8bac59
BZ
638 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
639 * 16 * num_dpb_buffer + 52 * 1024;
86fa0bdc
CK
640 break;
641
d38ceaf9
AD
642 default:
643 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
644 return -EINVAL;
645 }
646
647 if (width > pitch) {
648 DRM_ERROR("Invalid UVD decoding target pitch!\n");
649 return -EINVAL;
650 }
651
652 if (dpb_size < min_dpb_size) {
653 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
654 dpb_size, min_dpb_size);
655 return -EINVAL;
656 }
657
658 buf_sizes[0x1] = dpb_size;
659 buf_sizes[0x2] = image_size;
8c8bac59 660 buf_sizes[0x4] = min_ctx_size;
d38ceaf9
AD
661 return 0;
662}
663
664/**
665 * amdgpu_uvd_cs_msg - handle UVD message
666 *
667 * @ctx: UVD parser context
668 * @bo: buffer object containing the message
669 * @offset: offset into the buffer object
670 *
671 * Peek into the UVD message and extract the session id.
672 * Make sure that we don't open up to many sessions.
673 */
674static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
675 struct amdgpu_bo *bo, unsigned offset)
676{
677 struct amdgpu_device *adev = ctx->parser->adev;
678 int32_t *msg, msg_type, handle;
d38ceaf9 679 void *ptr;
4127a59e
CK
680 long r;
681 int i;
10dd74ea 682 uint32_t ip_instance = ctx->parser->job->ring->me;
d38ceaf9
AD
683
684 if (offset & 0x3F) {
10dd74ea 685 DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
d38ceaf9
AD
686 return -EINVAL;
687 }
688
d38ceaf9
AD
689 r = amdgpu_bo_kmap(bo, &ptr);
690 if (r) {
10dd74ea 691 DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
d38ceaf9
AD
692 return r;
693 }
694
695 msg = ptr + offset;
696
697 msg_type = msg[1];
698 handle = msg[2];
699
700 if (handle == 0) {
10dd74ea 701 DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
d38ceaf9
AD
702 return -EINVAL;
703 }
704
5146419e
LL
705 switch (msg_type) {
706 case 0:
707 /* it's a create msg, calc image size (width * height) */
708 amdgpu_bo_kunmap(bo);
709
710 /* try to alloc a new handle */
c0365541 711 for (i = 0; i < adev->uvd.max_handles; ++i) {
10dd74ea
JZ
712 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
713 DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
5146419e
LL
714 return -EINVAL;
715 }
716
10dd74ea
JZ
717 if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
718 adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
5146419e
LL
719 return 0;
720 }
721 }
722
10dd74ea 723 DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
7129d3ae 724 return -ENOSPC;
5146419e
LL
725
726 case 1:
d38ceaf9 727 /* it's a decode msg, calc buffer sizes */
8e008dd7 728 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
d38ceaf9
AD
729 amdgpu_bo_kunmap(bo);
730 if (r)
731 return r;
732
5146419e 733 /* validate the handle */
c0365541 734 for (i = 0; i < adev->uvd.max_handles; ++i) {
10dd74ea
JZ
735 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
736 if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
737 DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
5146419e
LL
738 return -EINVAL;
739 }
740 return 0;
741 }
742 }
743
10dd74ea 744 DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
5146419e
LL
745 return -ENOENT;
746
747 case 2:
d38ceaf9 748 /* it's a destroy msg, free the handle */
c0365541 749 for (i = 0; i < adev->uvd.max_handles; ++i)
10dd74ea 750 atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
d38ceaf9
AD
751 amdgpu_bo_kunmap(bo);
752 return 0;
d38ceaf9 753
5146419e 754 default:
10dd74ea 755 DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
5146419e 756 return -EINVAL;
d38ceaf9 757 }
5146419e 758 BUG();
d38ceaf9
AD
759 return -EINVAL;
760}
761
762/**
763 * amdgpu_uvd_cs_pass2 - second parsing round
764 *
765 * @ctx: UVD parser context
766 *
767 * Patch buffer addresses, make sure buffer sizes are correct.
768 */
769static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
770{
771 struct amdgpu_bo_va_mapping *mapping;
772 struct amdgpu_bo *bo;
80983e4d 773 uint32_t cmd;
d38ceaf9 774 uint64_t start, end;
80983e4d 775 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
d38ceaf9
AD
776 int r;
777
9cca0b8e
CK
778 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
779 if (r) {
042eb910 780 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
9cca0b8e 781 return r;
042eb910 782 }
d38ceaf9
AD
783
784 start = amdgpu_bo_gpu_offset(bo);
785
a9f87f64 786 end = (mapping->last + 1 - mapping->start);
d38ceaf9
AD
787 end = end * AMDGPU_GPU_PAGE_SIZE + start;
788
a9f87f64 789 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
d38ceaf9
AD
790 start += addr;
791
7270f839
CK
792 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
793 lower_32_bits(start));
794 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
795 upper_32_bits(start));
d38ceaf9
AD
796
797 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
798 if (cmd < 0x4) {
799 if ((end - start) < ctx->buf_sizes[cmd]) {
800 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
801 (unsigned)(end - start),
802 ctx->buf_sizes[cmd]);
803 return -EINVAL;
804 }
805
8c8bac59
BZ
806 } else if (cmd == 0x206) {
807 if ((end - start) < ctx->buf_sizes[4]) {
808 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
809 (unsigned)(end - start),
810 ctx->buf_sizes[4]);
811 return -EINVAL;
812 }
d38ceaf9
AD
813 } else if ((cmd != 0x100) && (cmd != 0x204)) {
814 DRM_ERROR("invalid UVD command %X!\n", cmd);
815 return -EINVAL;
816 }
817
818 if (!ctx->parser->adev->uvd.address_64_bit) {
819 if ((start >> 28) != ((end - 1) >> 28)) {
820 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
821 start, end);
822 return -EINVAL;
823 }
824
825 if ((cmd == 0 || cmd == 0x3) &&
2bb795f5 826 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
d38ceaf9
AD
827 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
828 start, end);
829 return -EINVAL;
830 }
831 }
832
833 if (cmd == 0) {
834 ctx->has_msg_cmd = true;
835 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
836 if (r)
837 return r;
838 } else if (!ctx->has_msg_cmd) {
839 DRM_ERROR("Message needed before other commands are send!\n");
840 return -EINVAL;
841 }
842
843 return 0;
844}
845
846/**
847 * amdgpu_uvd_cs_reg - parse register writes
848 *
849 * @ctx: UVD parser context
850 * @cb: callback function
851 *
852 * Parse the register writes, call cb on each complete command.
853 */
854static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
855 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
856{
50838c8c 857 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
d38ceaf9
AD
858 int i, r;
859
860 ctx->idx++;
861 for (i = 0; i <= ctx->count; ++i) {
862 unsigned reg = ctx->reg + i;
863
864 if (ctx->idx >= ib->length_dw) {
865 DRM_ERROR("Register command after end of CS!\n");
866 return -EINVAL;
867 }
868
869 switch (reg) {
870 case mmUVD_GPCOM_VCPU_DATA0:
871 ctx->data0 = ctx->idx;
872 break;
873 case mmUVD_GPCOM_VCPU_DATA1:
874 ctx->data1 = ctx->idx;
875 break;
876 case mmUVD_GPCOM_VCPU_CMD:
877 r = cb(ctx);
878 if (r)
879 return r;
880 break;
881 case mmUVD_ENGINE_CNTL:
8dd31d74 882 case mmUVD_NO_OP:
d38ceaf9
AD
883 break;
884 default:
885 DRM_ERROR("Invalid reg 0x%X!\n", reg);
886 return -EINVAL;
887 }
888 ctx->idx++;
889 }
890 return 0;
891}
892
893/**
894 * amdgpu_uvd_cs_packets - parse UVD packets
895 *
896 * @ctx: UVD parser context
897 * @cb: callback function
898 *
899 * Parse the command stream packets.
900 */
901static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
902 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
903{
50838c8c 904 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
d38ceaf9
AD
905 int r;
906
907 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
908 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
909 unsigned type = CP_PACKET_GET_TYPE(cmd);
910 switch (type) {
911 case PACKET_TYPE0:
912 ctx->reg = CP_PACKET0_GET_REG(cmd);
913 ctx->count = CP_PACKET_GET_COUNT(cmd);
914 r = amdgpu_uvd_cs_reg(ctx, cb);
915 if (r)
916 return r;
917 break;
918 case PACKET_TYPE2:
919 ++ctx->idx;
920 break;
921 default:
922 DRM_ERROR("Unknown packet type %d !\n", type);
923 return -EINVAL;
924 }
925 }
926 return 0;
927}
928
929/**
930 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
931 *
932 * @parser: Command submission parser context
933 *
934 * Parse the command stream, patch in addresses as necessary.
935 */
936int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
937{
938 struct amdgpu_uvd_cs_ctx ctx = {};
939 unsigned buf_sizes[] = {
940 [0x00000000] = 2048,
8c8bac59
BZ
941 [0x00000001] = 0xFFFFFFFF,
942 [0x00000002] = 0xFFFFFFFF,
d38ceaf9 943 [0x00000003] = 2048,
8c8bac59 944 [0x00000004] = 0xFFFFFFFF,
d38ceaf9 945 };
50838c8c 946 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
d38ceaf9
AD
947 int r;
948
45088efc
CK
949 parser->job->vm = NULL;
950 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
951
d38ceaf9
AD
952 if (ib->length_dw % 16) {
953 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
954 ib->length_dw);
955 return -EINVAL;
956 }
957
958 ctx.parser = parser;
959 ctx.buf_sizes = buf_sizes;
960 ctx.ib_idx = ib_idx;
961
042eb910
AD
962 /* first round only required on chips without UVD 64 bit address support */
963 if (!parser->adev->uvd.address_64_bit) {
964 /* first round, make sure the buffers are actually in the UVD segment */
965 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
966 if (r)
967 return r;
968 }
d38ceaf9
AD
969
970 /* second round, patch buffer addresses into the command stream */
971 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
972 if (r)
973 return r;
974
975 if (!ctx.has_msg_cmd) {
976 DRM_ERROR("UVD-IBs need a msg command!\n");
977 return -EINVAL;
978 }
979
d38ceaf9
AD
980 return 0;
981}
982
d7af97db 983static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
f54d1867 984 bool direct, struct dma_fence **fence)
d38ceaf9 985{
4ab91cfb
CK
986 struct amdgpu_device *adev = ring->adev;
987 struct dma_fence *f = NULL;
d71518b5
CK
988 struct amdgpu_job *job;
989 struct amdgpu_ib *ib;
09bfb891 990 uint32_t data[4];
4ab91cfb
CK
991 uint64_t addr;
992 long r;
993 int i;
9181dba6
JZ
994 unsigned offset_idx = 0;
995 unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
d38ceaf9 996
4ab91cfb
CK
997 amdgpu_bo_kunmap(bo);
998 amdgpu_bo_unpin(bo);
d38ceaf9 999
a7d64de6 1000 if (!ring->adev->uvd.address_64_bit) {
4ab91cfb
CK
1001 struct ttm_operation_ctx ctx = { true, false };
1002
d38ceaf9
AD
1003 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1004 amdgpu_uvd_force_into_uvd_segment(bo);
4ab91cfb
CK
1005 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1006 if (r)
1007 goto err;
d38ceaf9
AD
1008 }
1009
d71518b5 1010 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
7b5ec431 1011 if (r)
d71518b5 1012 goto err;
d38ceaf9 1013
09bfb891 1014 if (adev->asic_type >= CHIP_VEGA10) {
9181dba6
JZ
1015 offset_idx = 1 + ring->me;
1016 offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1017 offset[2] = adev->reg_offset[UVD_HWIP][1][1];
09bfb891
LL
1018 }
1019
9181dba6
JZ
1020 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
1021 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
1022 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
1023 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
1024
d71518b5 1025 ib = &job->ibs[0];
d38ceaf9 1026 addr = amdgpu_bo_gpu_offset(bo);
09bfb891 1027 ib->ptr[0] = data[0];
7b5ec431 1028 ib->ptr[1] = addr;
09bfb891 1029 ib->ptr[2] = data[1];
7b5ec431 1030 ib->ptr[3] = addr >> 32;
09bfb891 1031 ib->ptr[4] = data[2];
7b5ec431 1032 ib->ptr[5] = 0;
c8b4f288 1033 for (i = 6; i < 16; i += 2) {
09bfb891 1034 ib->ptr[i] = data[3];
c8b4f288
AD
1035 ib->ptr[i+1] = 0;
1036 }
7b5ec431 1037 ib->length_dw = 16;
d38ceaf9 1038
d7af97db 1039 if (direct) {
4ab91cfb
CK
1040 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1041 true, false,
1042 msecs_to_jiffies(10));
1043 if (r == 0)
1044 r = -ETIMEDOUT;
1045 if (r < 0)
1046 goto err_free;
1047
50ddc75e 1048 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
f54d1867 1049 job->fence = dma_fence_get(f);
d7af97db
CK
1050 if (r)
1051 goto err_free;
1052
1053 amdgpu_job_free(job);
1054 } else {
4ab91cfb
CK
1055 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
1056 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1057 if (r)
1058 goto err_free;
1059
10dd74ea 1060 r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
d7af97db
CK
1061 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1062 if (r)
1063 goto err_free;
1064 }
d38ceaf9 1065
4ab91cfb
CK
1066 amdgpu_bo_fence(bo, f, false);
1067 amdgpu_bo_unreserve(bo);
1068 amdgpu_bo_unref(&bo);
d38ceaf9 1069
7b5ec431 1070 if (fence)
f54d1867 1071 *fence = dma_fence_get(f);
f54d1867 1072 dma_fence_put(f);
7b5ec431 1073
7b5ec431 1074 return 0;
d71518b5
CK
1075
1076err_free:
1077 amdgpu_job_free(job);
1078
d38ceaf9 1079err:
4ab91cfb
CK
1080 amdgpu_bo_unreserve(bo);
1081 amdgpu_bo_unref(&bo);
d38ceaf9
AD
1082 return r;
1083}
1084
1085/* multiple fence commands without any stream commands in between can
1086 crash the vcpu so just try to emmit a dummy create/destroy msg to
1087 avoid this */
1088int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
f54d1867 1089 struct dma_fence **fence)
d38ceaf9
AD
1090{
1091 struct amdgpu_device *adev = ring->adev;
4ab91cfb 1092 struct amdgpu_bo *bo = NULL;
d38ceaf9
AD
1093 uint32_t *msg;
1094 int r, i;
1095
4ab91cfb
CK
1096 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1097 AMDGPU_GEM_DOMAIN_VRAM,
1098 &bo, NULL, (void **)&msg);
d38ceaf9
AD
1099 if (r)
1100 return r;
1101
d38ceaf9
AD
1102 /* stitch together an UVD create msg */
1103 msg[0] = cpu_to_le32(0x00000de4);
1104 msg[1] = cpu_to_le32(0x00000000);
1105 msg[2] = cpu_to_le32(handle);
1106 msg[3] = cpu_to_le32(0x00000000);
1107 msg[4] = cpu_to_le32(0x00000000);
1108 msg[5] = cpu_to_le32(0x00000000);
1109 msg[6] = cpu_to_le32(0x00000000);
1110 msg[7] = cpu_to_le32(0x00000780);
1111 msg[8] = cpu_to_le32(0x00000440);
1112 msg[9] = cpu_to_le32(0x00000000);
1113 msg[10] = cpu_to_le32(0x01b37000);
1114 for (i = 11; i < 1024; ++i)
1115 msg[i] = cpu_to_le32(0x0);
1116
d7af97db 1117 return amdgpu_uvd_send_msg(ring, bo, true, fence);
d38ceaf9
AD
1118}
1119
1120int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
f54d1867 1121 bool direct, struct dma_fence **fence)
d38ceaf9
AD
1122{
1123 struct amdgpu_device *adev = ring->adev;
4ab91cfb 1124 struct amdgpu_bo *bo = NULL;
d38ceaf9
AD
1125 uint32_t *msg;
1126 int r, i;
1127
4ab91cfb
CK
1128 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1129 AMDGPU_GEM_DOMAIN_VRAM,
1130 &bo, NULL, (void **)&msg);
d38ceaf9
AD
1131 if (r)
1132 return r;
1133
d38ceaf9
AD
1134 /* stitch together an UVD destroy msg */
1135 msg[0] = cpu_to_le32(0x00000de4);
1136 msg[1] = cpu_to_le32(0x00000002);
1137 msg[2] = cpu_to_le32(handle);
1138 msg[3] = cpu_to_le32(0x00000000);
1139 for (i = 4; i < 1024; ++i)
1140 msg[i] = cpu_to_le32(0x0);
1141
d7af97db 1142 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
d38ceaf9
AD
1143}
1144
1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1146{
1147 struct amdgpu_device *adev =
2bb795f5 1148 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
4bd2c5dd 1149 unsigned fences = 0, i, j;
6f0fd919
AD
1150
1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1152 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
4bd2c5dd
AD
1153 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1154 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1155 }
6f0fd919 1156 }
d38ceaf9 1157
713c0021 1158 if (fences == 0) {
d38ceaf9
AD
1159 if (adev->pm.dpm_enabled) {
1160 amdgpu_dpm_enable_uvd(adev, false);
1161 } else {
1162 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
e38ca2b3 1163 /* shutdown the UVD block */
2990a1fc
AD
1164 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1165 AMD_PG_STATE_GATE);
1166 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1167 AMD_CG_STATE_GATE);
d38ceaf9
AD
1168 }
1169 } else {
2bb795f5 1170 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
d38ceaf9
AD
1171 }
1172}
1173
c4120d55 1174void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
d38ceaf9 1175{
c4120d55 1176 struct amdgpu_device *adev = ring->adev;
14a8032a 1177 bool set_clocks;
d38ceaf9 1178
d9af2259
XY
1179 if (amdgpu_sriov_vf(adev))
1180 return;
1181
2bb795f5 1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
d38ceaf9
AD
1183 if (set_clocks) {
1184 if (adev->pm.dpm_enabled) {
1185 amdgpu_dpm_enable_uvd(adev, true);
1186 } else {
1187 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
2990a1fc
AD
1188 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1189 AMD_CG_STATE_UNGATE);
1190 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1191 AMD_PG_STATE_UNGATE);
d38ceaf9
AD
1192 }
1193 }
1194}
c4120d55
CK
1195
1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1197{
14a8032a 1198 if (!amdgpu_sriov_vf(ring->adev))
2bb795f5 1199 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
c4120d55 1200}
8de190c9
CK
1201
1202/**
1203 * amdgpu_uvd_ring_test_ib - test ib execution
1204 *
1205 * @ring: amdgpu_ring pointer
1206 *
1207 * Test if we can successfully execute an IB
1208 */
bbec97aa 1209int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
8de190c9 1210{
f54d1867 1211 struct dma_fence *fence;
bbec97aa 1212 long r;
10dd74ea 1213 uint32_t ip_instance = ring->me;
8de190c9
CK
1214
1215 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1216 if (r) {
10dd74ea 1217 DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
8de190c9
CK
1218 goto error;
1219 }
1220
1221 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1222 if (r) {
10dd74ea 1223 DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
8de190c9
CK
1224 goto error;
1225 }
1226
f54d1867 1227 r = dma_fence_wait_timeout(fence, false, timeout);
bbec97aa 1228 if (r == 0) {
10dd74ea 1229 DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
bbec97aa
CK
1230 r = -ETIMEDOUT;
1231 } else if (r < 0) {
10dd74ea 1232 DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
bbec97aa 1233 } else {
10dd74ea 1234 DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
bbec97aa 1235 r = 0;
8de190c9 1236 }
bbec97aa 1237
f54d1867 1238 dma_fence_put(fence);
c2a4c5b7
JC
1239
1240error:
8de190c9
CK
1241 return r;
1242}
44879b62
AN
1243
1244/**
1245 * amdgpu_uvd_used_handles - returns used UVD handles
1246 *
1247 * @adev: amdgpu_device pointer
1248 *
1249 * Returns the number of UVD handles in use
1250 */
1251uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1252{
1253 unsigned i;
1254 uint32_t used_handles = 0;
1255
1256 for (i = 0; i < adev->uvd.max_handles; ++i) {
1257 /*
1258 * Handles can be freed in any order, and not
1259 * necessarily linear. So we need to count
1260 * all non-zero handles.
1261 */
2bb795f5 1262 if (atomic_read(&adev->uvd.inst->handles[i]))
44879b62
AN
1263 used_handles++;
1264 }
1265
1266 return used_handles;
1267}