drm/scheduler: Rename cleanup functions v2.
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_uvd.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
08086635 43#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
4cb5877c
CK
44
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
8e008dd7 51/* Polaris10/11 firmware version */
4cb5877c 52#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
d38ceaf9
AD
53
54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
edf600da
CK
57#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
d38ceaf9
AD
60#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
61#endif
c65444fe
JZ
62#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
974ee3db 64#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
a39c8cea 65#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
2cc0c0b5 66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
925a51c4 67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
c4642a47 68#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
ba8f7ad0 69#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
d38ceaf9 70
09bfb891 71#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
2327e626 72#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
cac18c82 73#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
09bfb891 74
9181dba6
JZ
75/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
76#define UVD_GPCOM_VCPU_CMD 0x03c3
77#define UVD_GPCOM_VCPU_DATA0 0x03c4
78#define UVD_GPCOM_VCPU_DATA1 0x03c5
79#define UVD_NO_OP 0x03ff
80#define UVD_BASE_SI 0x3800
09bfb891 81
d38ceaf9
AD
82/**
83 * amdgpu_uvd_cs_ctx - Command submission parser context
84 *
85 * Used for emulating virtual memory support on UVD 4.2.
86 */
87struct amdgpu_uvd_cs_ctx {
88 struct amdgpu_cs_parser *parser;
89 unsigned reg, count;
90 unsigned data0, data1;
91 unsigned idx;
92 unsigned ib_idx;
93
94 /* does the IB has a msg command */
95 bool has_msg_cmd;
96
97 /* minimum buffer sizes */
98 unsigned *buf_sizes;
99};
100
101#ifdef CONFIG_DRM_AMDGPU_CIK
102MODULE_FIRMWARE(FIRMWARE_BONAIRE);
103MODULE_FIRMWARE(FIRMWARE_KABINI);
104MODULE_FIRMWARE(FIRMWARE_KAVERI);
105MODULE_FIRMWARE(FIRMWARE_HAWAII);
106MODULE_FIRMWARE(FIRMWARE_MULLINS);
107#endif
108MODULE_FIRMWARE(FIRMWARE_TONGA);
109MODULE_FIRMWARE(FIRMWARE_CARRIZO);
974ee3db 110MODULE_FIRMWARE(FIRMWARE_FIJI);
a39c8cea 111MODULE_FIRMWARE(FIRMWARE_STONEY);
2cc0c0b5
FC
112MODULE_FIRMWARE(FIRMWARE_POLARIS10);
113MODULE_FIRMWARE(FIRMWARE_POLARIS11);
c4642a47 114MODULE_FIRMWARE(FIRMWARE_POLARIS12);
ba8f7ad0 115MODULE_FIRMWARE(FIRMWARE_VEGAM);
d38ceaf9 116
09bfb891 117MODULE_FIRMWARE(FIRMWARE_VEGA10);
2327e626 118MODULE_FIRMWARE(FIRMWARE_VEGA12);
cac18c82 119MODULE_FIRMWARE(FIRMWARE_VEGA20);
09bfb891 120
d38ceaf9
AD
121static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
122
123int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
124{
ead833ec 125 struct amdgpu_ring *ring;
1b1f42d8 126 struct drm_sched_rq *rq;
d38ceaf9
AD
127 unsigned long bo_size;
128 const char *fw_name;
129 const struct common_firmware_header *hdr;
5c219927 130 unsigned family_id;
10dd74ea 131 int i, j, r;
d38ceaf9 132
5c53d19b 133 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
d38ceaf9
AD
134
135 switch (adev->asic_type) {
136#ifdef CONFIG_DRM_AMDGPU_CIK
137 case CHIP_BONAIRE:
138 fw_name = FIRMWARE_BONAIRE;
139 break;
140 case CHIP_KABINI:
141 fw_name = FIRMWARE_KABINI;
142 break;
143 case CHIP_KAVERI:
144 fw_name = FIRMWARE_KAVERI;
145 break;
146 case CHIP_HAWAII:
147 fw_name = FIRMWARE_HAWAII;
148 break;
149 case CHIP_MULLINS:
150 fw_name = FIRMWARE_MULLINS;
151 break;
152#endif
153 case CHIP_TONGA:
154 fw_name = FIRMWARE_TONGA;
155 break;
974ee3db
DZ
156 case CHIP_FIJI:
157 fw_name = FIRMWARE_FIJI;
158 break;
d38ceaf9
AD
159 case CHIP_CARRIZO:
160 fw_name = FIRMWARE_CARRIZO;
161 break;
a39c8cea
SL
162 case CHIP_STONEY:
163 fw_name = FIRMWARE_STONEY;
164 break;
2cc0c0b5
FC
165 case CHIP_POLARIS10:
166 fw_name = FIRMWARE_POLARIS10;
38d75817 167 break;
2cc0c0b5
FC
168 case CHIP_POLARIS11:
169 fw_name = FIRMWARE_POLARIS11;
c4642a47 170 break;
2327e626
AD
171 case CHIP_POLARIS12:
172 fw_name = FIRMWARE_POLARIS12;
173 break;
09bfb891
LL
174 case CHIP_VEGA10:
175 fw_name = FIRMWARE_VEGA10;
176 break;
2327e626
AD
177 case CHIP_VEGA12:
178 fw_name = FIRMWARE_VEGA12;
38d75817 179 break;
ba8f7ad0
LL
180 case CHIP_VEGAM:
181 fw_name = FIRMWARE_VEGAM;
182 break;
cac18c82
FX
183 case CHIP_VEGA20:
184 fw_name = FIRMWARE_VEGA20;
185 break;
d38ceaf9
AD
186 default:
187 return -EINVAL;
188 }
189
190 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
191 if (r) {
192 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
193 fw_name);
194 return r;
195 }
196
197 r = amdgpu_ucode_validate(adev->uvd.fw);
198 if (r) {
199 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
200 fw_name);
201 release_firmware(adev->uvd.fw);
202 adev->uvd.fw = NULL;
203 return r;
204 }
205
c0365541
AN
206 /* Set the default UVD handles that the firmware can handle */
207 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
208
d38ceaf9
AD
209 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
210 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
dd06eecb
JZ
211
212 if (adev->asic_type < CHIP_VEGA20) {
5c219927
AD
213 unsigned version_major, version_minor;
214
dd06eecb
JZ
215 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
216 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
217 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
218 version_major, version_minor, family_id);
5c219927
AD
219
220 /*
221 * Limit the number of UVD handles depending on microcode major
222 * and minor versions. The firmware version which has 40 UVD
223 * instances support is 1.80. So all subsequent versions should
224 * also have the same support.
225 */
226 if ((version_major > 0x01) ||
227 ((version_major == 0x01) && (version_minor >= 0x50)))
228 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
229
230 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
231 (family_id << 8));
232
233 if ((adev->asic_type == CHIP_POLARIS10 ||
234 adev->asic_type == CHIP_POLARIS11) &&
235 (adev->uvd.fw_version < FW_1_66_16))
236 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
237 version_major, version_minor);
dd06eecb
JZ
238 } else {
239 unsigned int enc_major, enc_minor, dec_minor;
240
241 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
242 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
243 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
244 DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
245 enc_major, enc_minor, dec_minor, family_id);
d38ceaf9 246
c0365541
AN
247 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
248
5c219927
AD
249 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
250 }
8e008dd7 251
09bfb891 252 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
c0365541 253 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
09bfb891
LL
254 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
255 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
256
10dd74ea 257 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
d38ceaf9 258
10dd74ea
JZ
259 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
260 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
261 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
262 if (r) {
263 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
264 return r;
265 }
ead833ec 266
10dd74ea
JZ
267 ring = &adev->uvd.inst[j].ring;
268 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
269 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
270 rq, NULL);
271 if (r != 0) {
272 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
273 return r;
274 }
d38ceaf9 275
10dd74ea
JZ
276 for (i = 0; i < adev->uvd.max_handles; ++i) {
277 atomic_set(&adev->uvd.inst[j].handles[i], 0);
278 adev->uvd.inst[j].filp[i] = NULL;
279 }
280 }
d38ceaf9 281 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
2990a1fc 282 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
d38ceaf9
AD
283 adev->uvd.address_64_bit = true;
284
4cb5877c
CK
285 switch (adev->asic_type) {
286 case CHIP_TONGA:
287 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
288 break;
289 case CHIP_CARRIZO:
290 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
291 break;
292 case CHIP_FIJI:
293 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
294 break;
295 case CHIP_STONEY:
296 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
297 break;
298 default:
299 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
300 }
301
d38ceaf9
AD
302 return 0;
303}
304
305int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
306{
10dd74ea 307 int i, j;
d38ceaf9 308
10dd74ea
JZ
309 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
310 kfree(adev->uvd.inst[j].saved_bo);
ead833ec 311
180fc134 312 drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
d38ceaf9 313
10dd74ea
JZ
314 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
315 &adev->uvd.inst[j].gpu_addr,
316 (void **)&adev->uvd.inst[j].cpu_addr);
d38ceaf9 317
10dd74ea 318 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
4ff184d7 319
10dd74ea
JZ
320 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
321 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
322 }
d38ceaf9
AD
323 release_firmware(adev->uvd.fw);
324
325 return 0;
326}
327
328int amdgpu_uvd_suspend(struct amdgpu_device *adev)
329{
3f99dd81
LL
330 unsigned size;
331 void *ptr;
10dd74ea 332 int i, j;
d38ceaf9 333
5c53d19b
JZ
334 cancel_delayed_work_sync(&adev->uvd.idle_work);
335
10dd74ea
JZ
336 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
337 if (adev->uvd.inst[j].vcpu_bo == NULL)
338 continue;
d38ceaf9 339
10dd74ea
JZ
340 /* only valid for physical mode */
341 if (adev->asic_type < CHIP_POLARIS10) {
342 for (i = 0; i < adev->uvd.max_handles; ++i)
343 if (atomic_read(&adev->uvd.inst[j].handles[i]))
344 break;
f6c3b601 345
10dd74ea
JZ
346 if (i == adev->uvd.max_handles)
347 continue;
348 }
d38ceaf9 349
10dd74ea
JZ
350 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
351 ptr = adev->uvd.inst[j].cpu_addr;
d38ceaf9 352
10dd74ea
JZ
353 adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
354 if (!adev->uvd.inst[j].saved_bo)
355 return -ENOMEM;
d38ceaf9 356
10dd74ea
JZ
357 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
358 }
d38ceaf9
AD
359 return 0;
360}
361
362int amdgpu_uvd_resume(struct amdgpu_device *adev)
363{
364 unsigned size;
365 void *ptr;
10dd74ea 366 int i;
d38ceaf9 367
10dd74ea
JZ
368 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
369 if (adev->uvd.inst[i].vcpu_bo == NULL)
370 return -EINVAL;
d38ceaf9 371
10dd74ea
JZ
372 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
373 ptr = adev->uvd.inst[i].cpu_addr;
d38ceaf9 374
10dd74ea
JZ
375 if (adev->uvd.inst[i].saved_bo != NULL) {
376 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
377 kfree(adev->uvd.inst[i].saved_bo);
378 adev->uvd.inst[i].saved_bo = NULL;
379 } else {
380 const struct common_firmware_header *hdr;
381 unsigned offset;
382
383 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
384 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
385 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
386 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
387 le32_to_cpu(hdr->ucode_size_bytes));
388 size -= le32_to_cpu(hdr->ucode_size_bytes);
389 ptr += le32_to_cpu(hdr->ucode_size_bytes);
390 }
391 memset_io(ptr, 0, size);
392 /* to restore uvd fence seq */
393 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
09bfb891 394 }
d23be4e3 395 }
d38ceaf9
AD
396 return 0;
397}
398
399void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
400{
10dd74ea
JZ
401 struct amdgpu_ring *ring;
402 int i, j, r;
d38ceaf9 403
10dd74ea
JZ
404 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
405 ring = &adev->uvd.inst[j].ring;
d38ceaf9 406
10dd74ea
JZ
407 for (i = 0; i < adev->uvd.max_handles; ++i) {
408 uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
409 if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
410 struct dma_fence *fence;
411
412 r = amdgpu_uvd_get_destroy_msg(ring, handle,
413 false, &fence);
414 if (r) {
415 DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
416 continue;
417 }
d38ceaf9 418
10dd74ea
JZ
419 dma_fence_wait(fence, false);
420 dma_fence_put(fence);
421
422 adev->uvd.inst[j].filp[i] = NULL;
423 atomic_set(&adev->uvd.inst[j].handles[i], 0);
424 }
d38ceaf9
AD
425 }
426 }
427}
428
765e7fbf 429static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
d38ceaf9
AD
430{
431 int i;
765e7fbf
CK
432 for (i = 0; i < abo->placement.num_placement; ++i) {
433 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
434 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
d38ceaf9
AD
435 }
436}
437
80983e4d
AD
438static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
439{
440 uint32_t lo, hi;
441 uint64_t addr;
442
443 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
444 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
445 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
446
447 return addr;
448}
449
d38ceaf9
AD
450/**
451 * amdgpu_uvd_cs_pass1 - first parsing round
452 *
453 * @ctx: UVD parser context
454 *
455 * Make sure UVD message and feedback buffers are in VRAM and
456 * nobody is violating an 256MB boundary.
457 */
458static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
459{
19be5570 460 struct ttm_operation_ctx tctx = { false, false };
d38ceaf9
AD
461 struct amdgpu_bo_va_mapping *mapping;
462 struct amdgpu_bo *bo;
80983e4d
AD
463 uint32_t cmd;
464 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
d38ceaf9
AD
465 int r = 0;
466
9cca0b8e
CK
467 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
468 if (r) {
d38ceaf9 469 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
9cca0b8e 470 return r;
d38ceaf9
AD
471 }
472
473 if (!ctx->parser->adev->uvd.address_64_bit) {
474 /* check if it's a message or feedback command */
475 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
476 if (cmd == 0x0 || cmd == 0x3) {
477 /* yes, force it into VRAM */
478 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
479 amdgpu_ttm_placement_from_domain(bo, domain);
480 }
481 amdgpu_uvd_force_into_uvd_segment(bo);
482
19be5570 483 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
d38ceaf9
AD
484 }
485
486 return r;
487}
488
489/**
490 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
491 *
492 * @msg: pointer to message structure
493 * @buf_sizes: returned buffer sizes
494 *
495 * Peek into the decode message and calculate the necessary buffer sizes.
496 */
8e008dd7
SJ
497static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
498 unsigned buf_sizes[])
d38ceaf9
AD
499{
500 unsigned stream_type = msg[4];
501 unsigned width = msg[6];
502 unsigned height = msg[7];
503 unsigned dpb_size = msg[9];
504 unsigned pitch = msg[28];
505 unsigned level = msg[57];
506
507 unsigned width_in_mb = width / 16;
508 unsigned height_in_mb = ALIGN(height / 16, 2);
509 unsigned fs_in_mb = width_in_mb * height_in_mb;
510
21df89a5 511 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
e5a6858d 512 unsigned min_ctx_size = ~0;
d38ceaf9
AD
513
514 image_size = width * height;
515 image_size += image_size / 2;
516 image_size = ALIGN(image_size, 1024);
517
518 switch (stream_type) {
519 case 0: /* H264 */
d38ceaf9
AD
520 switch(level) {
521 case 30:
522 num_dpb_buffer = 8100 / fs_in_mb;
523 break;
524 case 31:
525 num_dpb_buffer = 18000 / fs_in_mb;
526 break;
527 case 32:
528 num_dpb_buffer = 20480 / fs_in_mb;
529 break;
530 case 41:
531 num_dpb_buffer = 32768 / fs_in_mb;
532 break;
533 case 42:
534 num_dpb_buffer = 34816 / fs_in_mb;
535 break;
536 case 50:
537 num_dpb_buffer = 110400 / fs_in_mb;
538 break;
539 case 51:
540 num_dpb_buffer = 184320 / fs_in_mb;
541 break;
542 default:
543 num_dpb_buffer = 184320 / fs_in_mb;
544 break;
545 }
546 num_dpb_buffer++;
547 if (num_dpb_buffer > 17)
548 num_dpb_buffer = 17;
549
550 /* reference picture buffer */
551 min_dpb_size = image_size * num_dpb_buffer;
552
553 /* macroblock context buffer */
554 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
555
556 /* IT surface buffer */
557 min_dpb_size += width_in_mb * height_in_mb * 32;
558 break;
559
560 case 1: /* VC1 */
561
562 /* reference picture buffer */
563 min_dpb_size = image_size * 3;
564
565 /* CONTEXT_BUFFER */
566 min_dpb_size += width_in_mb * height_in_mb * 128;
567
568 /* IT surface buffer */
569 min_dpb_size += width_in_mb * 64;
570
571 /* DB surface buffer */
572 min_dpb_size += width_in_mb * 128;
573
574 /* BP */
575 tmp = max(width_in_mb, height_in_mb);
576 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
577 break;
578
579 case 3: /* MPEG2 */
580
581 /* reference picture buffer */
582 min_dpb_size = image_size * 3;
583 break;
584
585 case 4: /* MPEG4 */
586
587 /* reference picture buffer */
588 min_dpb_size = image_size * 3;
589
590 /* CM */
591 min_dpb_size += width_in_mb * height_in_mb * 64;
592
593 /* IT surface buffer */
594 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
595 break;
596
8e008dd7
SJ
597 case 7: /* H264 Perf */
598 switch(level) {
599 case 30:
600 num_dpb_buffer = 8100 / fs_in_mb;
601 break;
602 case 31:
603 num_dpb_buffer = 18000 / fs_in_mb;
604 break;
605 case 32:
606 num_dpb_buffer = 20480 / fs_in_mb;
607 break;
608 case 41:
609 num_dpb_buffer = 32768 / fs_in_mb;
610 break;
611 case 42:
612 num_dpb_buffer = 34816 / fs_in_mb;
613 break;
614 case 50:
615 num_dpb_buffer = 110400 / fs_in_mb;
616 break;
617 case 51:
618 num_dpb_buffer = 184320 / fs_in_mb;
619 break;
620 default:
621 num_dpb_buffer = 184320 / fs_in_mb;
622 break;
623 }
624 num_dpb_buffer++;
625 if (num_dpb_buffer > 17)
626 num_dpb_buffer = 17;
627
628 /* reference picture buffer */
629 min_dpb_size = image_size * num_dpb_buffer;
630
4cb5877c 631 if (!adev->uvd.use_ctx_buf){
8e008dd7
SJ
632 /* macroblock context buffer */
633 min_dpb_size +=
634 width_in_mb * height_in_mb * num_dpb_buffer * 192;
635
636 /* IT surface buffer */
637 min_dpb_size += width_in_mb * height_in_mb * 32;
638 } else {
639 /* macroblock context buffer */
640 min_ctx_size =
641 width_in_mb * height_in_mb * num_dpb_buffer * 192;
642 }
643 break;
644
d0b83d41
LL
645 case 8: /* MJPEG */
646 min_dpb_size = 0;
647 break;
648
86fa0bdc
CK
649 case 16: /* H265 */
650 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
651 image_size = ALIGN(image_size, 256);
652
653 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
654 min_dpb_size = image_size * num_dpb_buffer;
8c8bac59
BZ
655 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
656 * 16 * num_dpb_buffer + 52 * 1024;
86fa0bdc
CK
657 break;
658
d38ceaf9
AD
659 default:
660 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
661 return -EINVAL;
662 }
663
664 if (width > pitch) {
665 DRM_ERROR("Invalid UVD decoding target pitch!\n");
666 return -EINVAL;
667 }
668
669 if (dpb_size < min_dpb_size) {
670 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
671 dpb_size, min_dpb_size);
672 return -EINVAL;
673 }
674
675 buf_sizes[0x1] = dpb_size;
676 buf_sizes[0x2] = image_size;
8c8bac59 677 buf_sizes[0x4] = min_ctx_size;
d38ceaf9
AD
678 return 0;
679}
680
681/**
682 * amdgpu_uvd_cs_msg - handle UVD message
683 *
684 * @ctx: UVD parser context
685 * @bo: buffer object containing the message
686 * @offset: offset into the buffer object
687 *
688 * Peek into the UVD message and extract the session id.
689 * Make sure that we don't open up to many sessions.
690 */
691static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
692 struct amdgpu_bo *bo, unsigned offset)
693{
694 struct amdgpu_device *adev = ctx->parser->adev;
695 int32_t *msg, msg_type, handle;
d38ceaf9 696 void *ptr;
4127a59e
CK
697 long r;
698 int i;
10dd74ea 699 uint32_t ip_instance = ctx->parser->job->ring->me;
d38ceaf9
AD
700
701 if (offset & 0x3F) {
10dd74ea 702 DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
d38ceaf9
AD
703 return -EINVAL;
704 }
705
d38ceaf9
AD
706 r = amdgpu_bo_kmap(bo, &ptr);
707 if (r) {
10dd74ea 708 DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
d38ceaf9
AD
709 return r;
710 }
711
712 msg = ptr + offset;
713
714 msg_type = msg[1];
715 handle = msg[2];
716
717 if (handle == 0) {
10dd74ea 718 DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
d38ceaf9
AD
719 return -EINVAL;
720 }
721
5146419e
LL
722 switch (msg_type) {
723 case 0:
724 /* it's a create msg, calc image size (width * height) */
725 amdgpu_bo_kunmap(bo);
726
727 /* try to alloc a new handle */
c0365541 728 for (i = 0; i < adev->uvd.max_handles; ++i) {
10dd74ea
JZ
729 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
730 DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
5146419e
LL
731 return -EINVAL;
732 }
733
10dd74ea
JZ
734 if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
735 adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
5146419e
LL
736 return 0;
737 }
738 }
739
10dd74ea 740 DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
7129d3ae 741 return -ENOSPC;
5146419e
LL
742
743 case 1:
d38ceaf9 744 /* it's a decode msg, calc buffer sizes */
8e008dd7 745 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
d38ceaf9
AD
746 amdgpu_bo_kunmap(bo);
747 if (r)
748 return r;
749
5146419e 750 /* validate the handle */
c0365541 751 for (i = 0; i < adev->uvd.max_handles; ++i) {
10dd74ea
JZ
752 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
753 if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
754 DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
5146419e
LL
755 return -EINVAL;
756 }
757 return 0;
758 }
759 }
760
10dd74ea 761 DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
5146419e
LL
762 return -ENOENT;
763
764 case 2:
d38ceaf9 765 /* it's a destroy msg, free the handle */
c0365541 766 for (i = 0; i < adev->uvd.max_handles; ++i)
10dd74ea 767 atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
d38ceaf9
AD
768 amdgpu_bo_kunmap(bo);
769 return 0;
d38ceaf9 770
5146419e 771 default:
10dd74ea 772 DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
5146419e 773 return -EINVAL;
d38ceaf9 774 }
5146419e 775 BUG();
d38ceaf9
AD
776 return -EINVAL;
777}
778
779/**
780 * amdgpu_uvd_cs_pass2 - second parsing round
781 *
782 * @ctx: UVD parser context
783 *
784 * Patch buffer addresses, make sure buffer sizes are correct.
785 */
786static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
787{
788 struct amdgpu_bo_va_mapping *mapping;
789 struct amdgpu_bo *bo;
80983e4d 790 uint32_t cmd;
d38ceaf9 791 uint64_t start, end;
80983e4d 792 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
d38ceaf9
AD
793 int r;
794
9cca0b8e
CK
795 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
796 if (r) {
042eb910 797 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
9cca0b8e 798 return r;
042eb910 799 }
d38ceaf9
AD
800
801 start = amdgpu_bo_gpu_offset(bo);
802
a9f87f64 803 end = (mapping->last + 1 - mapping->start);
d38ceaf9
AD
804 end = end * AMDGPU_GPU_PAGE_SIZE + start;
805
a9f87f64 806 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
d38ceaf9
AD
807 start += addr;
808
7270f839
CK
809 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
810 lower_32_bits(start));
811 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
812 upper_32_bits(start));
d38ceaf9
AD
813
814 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
815 if (cmd < 0x4) {
816 if ((end - start) < ctx->buf_sizes[cmd]) {
817 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
818 (unsigned)(end - start),
819 ctx->buf_sizes[cmd]);
820 return -EINVAL;
821 }
822
8c8bac59
BZ
823 } else if (cmd == 0x206) {
824 if ((end - start) < ctx->buf_sizes[4]) {
825 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
826 (unsigned)(end - start),
827 ctx->buf_sizes[4]);
828 return -EINVAL;
829 }
d38ceaf9
AD
830 } else if ((cmd != 0x100) && (cmd != 0x204)) {
831 DRM_ERROR("invalid UVD command %X!\n", cmd);
832 return -EINVAL;
833 }
834
835 if (!ctx->parser->adev->uvd.address_64_bit) {
836 if ((start >> 28) != ((end - 1) >> 28)) {
837 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
838 start, end);
839 return -EINVAL;
840 }
841
842 if ((cmd == 0 || cmd == 0x3) &&
2bb795f5 843 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
d38ceaf9
AD
844 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
845 start, end);
846 return -EINVAL;
847 }
848 }
849
850 if (cmd == 0) {
851 ctx->has_msg_cmd = true;
852 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
853 if (r)
854 return r;
855 } else if (!ctx->has_msg_cmd) {
856 DRM_ERROR("Message needed before other commands are send!\n");
857 return -EINVAL;
858 }
859
860 return 0;
861}
862
863/**
864 * amdgpu_uvd_cs_reg - parse register writes
865 *
866 * @ctx: UVD parser context
867 * @cb: callback function
868 *
869 * Parse the register writes, call cb on each complete command.
870 */
871static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
872 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
873{
50838c8c 874 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
d38ceaf9
AD
875 int i, r;
876
877 ctx->idx++;
878 for (i = 0; i <= ctx->count; ++i) {
879 unsigned reg = ctx->reg + i;
880
881 if (ctx->idx >= ib->length_dw) {
882 DRM_ERROR("Register command after end of CS!\n");
883 return -EINVAL;
884 }
885
886 switch (reg) {
887 case mmUVD_GPCOM_VCPU_DATA0:
888 ctx->data0 = ctx->idx;
889 break;
890 case mmUVD_GPCOM_VCPU_DATA1:
891 ctx->data1 = ctx->idx;
892 break;
893 case mmUVD_GPCOM_VCPU_CMD:
894 r = cb(ctx);
895 if (r)
896 return r;
897 break;
898 case mmUVD_ENGINE_CNTL:
8dd31d74 899 case mmUVD_NO_OP:
d38ceaf9
AD
900 break;
901 default:
902 DRM_ERROR("Invalid reg 0x%X!\n", reg);
903 return -EINVAL;
904 }
905 ctx->idx++;
906 }
907 return 0;
908}
909
910/**
911 * amdgpu_uvd_cs_packets - parse UVD packets
912 *
913 * @ctx: UVD parser context
914 * @cb: callback function
915 *
916 * Parse the command stream packets.
917 */
918static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
919 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
920{
50838c8c 921 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
d38ceaf9
AD
922 int r;
923
924 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
925 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
926 unsigned type = CP_PACKET_GET_TYPE(cmd);
927 switch (type) {
928 case PACKET_TYPE0:
929 ctx->reg = CP_PACKET0_GET_REG(cmd);
930 ctx->count = CP_PACKET_GET_COUNT(cmd);
931 r = amdgpu_uvd_cs_reg(ctx, cb);
932 if (r)
933 return r;
934 break;
935 case PACKET_TYPE2:
936 ++ctx->idx;
937 break;
938 default:
939 DRM_ERROR("Unknown packet type %d !\n", type);
940 return -EINVAL;
941 }
942 }
943 return 0;
944}
945
946/**
947 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
948 *
949 * @parser: Command submission parser context
950 *
951 * Parse the command stream, patch in addresses as necessary.
952 */
953int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
954{
955 struct amdgpu_uvd_cs_ctx ctx = {};
956 unsigned buf_sizes[] = {
957 [0x00000000] = 2048,
8c8bac59
BZ
958 [0x00000001] = 0xFFFFFFFF,
959 [0x00000002] = 0xFFFFFFFF,
d38ceaf9 960 [0x00000003] = 2048,
8c8bac59 961 [0x00000004] = 0xFFFFFFFF,
d38ceaf9 962 };
50838c8c 963 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
d38ceaf9
AD
964 int r;
965
45088efc
CK
966 parser->job->vm = NULL;
967 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
968
d38ceaf9
AD
969 if (ib->length_dw % 16) {
970 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
971 ib->length_dw);
972 return -EINVAL;
973 }
974
975 ctx.parser = parser;
976 ctx.buf_sizes = buf_sizes;
977 ctx.ib_idx = ib_idx;
978
042eb910
AD
979 /* first round only required on chips without UVD 64 bit address support */
980 if (!parser->adev->uvd.address_64_bit) {
981 /* first round, make sure the buffers are actually in the UVD segment */
982 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
983 if (r)
984 return r;
985 }
d38ceaf9
AD
986
987 /* second round, patch buffer addresses into the command stream */
988 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
989 if (r)
990 return r;
991
992 if (!ctx.has_msg_cmd) {
993 DRM_ERROR("UVD-IBs need a msg command!\n");
994 return -EINVAL;
995 }
996
d38ceaf9
AD
997 return 0;
998}
999
d7af97db 1000static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
f54d1867 1001 bool direct, struct dma_fence **fence)
d38ceaf9 1002{
4ab91cfb
CK
1003 struct amdgpu_device *adev = ring->adev;
1004 struct dma_fence *f = NULL;
d71518b5
CK
1005 struct amdgpu_job *job;
1006 struct amdgpu_ib *ib;
09bfb891 1007 uint32_t data[4];
4ab91cfb
CK
1008 uint64_t addr;
1009 long r;
1010 int i;
9181dba6
JZ
1011 unsigned offset_idx = 0;
1012 unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
d38ceaf9 1013
4ab91cfb
CK
1014 amdgpu_bo_kunmap(bo);
1015 amdgpu_bo_unpin(bo);
d38ceaf9 1016
a7d64de6 1017 if (!ring->adev->uvd.address_64_bit) {
4ab91cfb
CK
1018 struct ttm_operation_ctx ctx = { true, false };
1019
d38ceaf9
AD
1020 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1021 amdgpu_uvd_force_into_uvd_segment(bo);
4ab91cfb
CK
1022 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1023 if (r)
1024 goto err;
d38ceaf9
AD
1025 }
1026
d71518b5 1027 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
7b5ec431 1028 if (r)
d71518b5 1029 goto err;
d38ceaf9 1030
09bfb891 1031 if (adev->asic_type >= CHIP_VEGA10) {
9181dba6
JZ
1032 offset_idx = 1 + ring->me;
1033 offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1034 offset[2] = adev->reg_offset[UVD_HWIP][1][1];
09bfb891
LL
1035 }
1036
9181dba6
JZ
1037 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
1038 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
1039 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
1040 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
1041
d71518b5 1042 ib = &job->ibs[0];
d38ceaf9 1043 addr = amdgpu_bo_gpu_offset(bo);
09bfb891 1044 ib->ptr[0] = data[0];
7b5ec431 1045 ib->ptr[1] = addr;
09bfb891 1046 ib->ptr[2] = data[1];
7b5ec431 1047 ib->ptr[3] = addr >> 32;
09bfb891 1048 ib->ptr[4] = data[2];
7b5ec431 1049 ib->ptr[5] = 0;
c8b4f288 1050 for (i = 6; i < 16; i += 2) {
09bfb891 1051 ib->ptr[i] = data[3];
c8b4f288
AD
1052 ib->ptr[i+1] = 0;
1053 }
7b5ec431 1054 ib->length_dw = 16;
d38ceaf9 1055
d7af97db 1056 if (direct) {
4ab91cfb
CK
1057 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1058 true, false,
1059 msecs_to_jiffies(10));
1060 if (r == 0)
1061 r = -ETIMEDOUT;
1062 if (r < 0)
1063 goto err_free;
1064
50ddc75e 1065 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
f54d1867 1066 job->fence = dma_fence_get(f);
d7af97db
CK
1067 if (r)
1068 goto err_free;
1069
1070 amdgpu_job_free(job);
1071 } else {
4ab91cfb
CK
1072 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
1073 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1074 if (r)
1075 goto err_free;
1076
10dd74ea 1077 r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
d7af97db
CK
1078 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1079 if (r)
1080 goto err_free;
1081 }
d38ceaf9 1082
4ab91cfb
CK
1083 amdgpu_bo_fence(bo, f, false);
1084 amdgpu_bo_unreserve(bo);
1085 amdgpu_bo_unref(&bo);
d38ceaf9 1086
7b5ec431 1087 if (fence)
f54d1867 1088 *fence = dma_fence_get(f);
f54d1867 1089 dma_fence_put(f);
7b5ec431 1090
7b5ec431 1091 return 0;
d71518b5
CK
1092
1093err_free:
1094 amdgpu_job_free(job);
1095
d38ceaf9 1096err:
4ab91cfb
CK
1097 amdgpu_bo_unreserve(bo);
1098 amdgpu_bo_unref(&bo);
d38ceaf9
AD
1099 return r;
1100}
1101
1102/* multiple fence commands without any stream commands in between can
1103 crash the vcpu so just try to emmit a dummy create/destroy msg to
1104 avoid this */
1105int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
f54d1867 1106 struct dma_fence **fence)
d38ceaf9
AD
1107{
1108 struct amdgpu_device *adev = ring->adev;
4ab91cfb 1109 struct amdgpu_bo *bo = NULL;
d38ceaf9
AD
1110 uint32_t *msg;
1111 int r, i;
1112
4ab91cfb
CK
1113 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1114 AMDGPU_GEM_DOMAIN_VRAM,
1115 &bo, NULL, (void **)&msg);
d38ceaf9
AD
1116 if (r)
1117 return r;
1118
d38ceaf9
AD
1119 /* stitch together an UVD create msg */
1120 msg[0] = cpu_to_le32(0x00000de4);
1121 msg[1] = cpu_to_le32(0x00000000);
1122 msg[2] = cpu_to_le32(handle);
1123 msg[3] = cpu_to_le32(0x00000000);
1124 msg[4] = cpu_to_le32(0x00000000);
1125 msg[5] = cpu_to_le32(0x00000000);
1126 msg[6] = cpu_to_le32(0x00000000);
1127 msg[7] = cpu_to_le32(0x00000780);
1128 msg[8] = cpu_to_le32(0x00000440);
1129 msg[9] = cpu_to_le32(0x00000000);
1130 msg[10] = cpu_to_le32(0x01b37000);
1131 for (i = 11; i < 1024; ++i)
1132 msg[i] = cpu_to_le32(0x0);
1133
d7af97db 1134 return amdgpu_uvd_send_msg(ring, bo, true, fence);
d38ceaf9
AD
1135}
1136
1137int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
f54d1867 1138 bool direct, struct dma_fence **fence)
d38ceaf9
AD
1139{
1140 struct amdgpu_device *adev = ring->adev;
4ab91cfb 1141 struct amdgpu_bo *bo = NULL;
d38ceaf9
AD
1142 uint32_t *msg;
1143 int r, i;
1144
4ab91cfb
CK
1145 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1146 AMDGPU_GEM_DOMAIN_VRAM,
1147 &bo, NULL, (void **)&msg);
d38ceaf9
AD
1148 if (r)
1149 return r;
1150
d38ceaf9
AD
1151 /* stitch together an UVD destroy msg */
1152 msg[0] = cpu_to_le32(0x00000de4);
1153 msg[1] = cpu_to_le32(0x00000002);
1154 msg[2] = cpu_to_le32(handle);
1155 msg[3] = cpu_to_le32(0x00000000);
1156 for (i = 4; i < 1024; ++i)
1157 msg[i] = cpu_to_le32(0x0);
1158
d7af97db 1159 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
d38ceaf9
AD
1160}
1161
1162static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1163{
1164 struct amdgpu_device *adev =
5c53d19b 1165 container_of(work, struct amdgpu_device, uvd.idle_work.work);
4bd2c5dd 1166 unsigned fences = 0, i, j;
6f0fd919
AD
1167
1168 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1169 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
4bd2c5dd
AD
1170 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1171 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1172 }
6f0fd919 1173 }
d38ceaf9 1174
713c0021 1175 if (fences == 0) {
d38ceaf9
AD
1176 if (adev->pm.dpm_enabled) {
1177 amdgpu_dpm_enable_uvd(adev, false);
1178 } else {
1179 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
e38ca2b3 1180 /* shutdown the UVD block */
2990a1fc
AD
1181 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1182 AMD_PG_STATE_GATE);
1183 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1184 AMD_CG_STATE_GATE);
d38ceaf9
AD
1185 }
1186 } else {
5c53d19b 1187 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
d38ceaf9
AD
1188 }
1189}
1190
c4120d55 1191void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
d38ceaf9 1192{
c4120d55 1193 struct amdgpu_device *adev = ring->adev;
14a8032a 1194 bool set_clocks;
d38ceaf9 1195
d9af2259
XY
1196 if (amdgpu_sriov_vf(adev))
1197 return;
1198
5c53d19b 1199 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
d38ceaf9
AD
1200 if (set_clocks) {
1201 if (adev->pm.dpm_enabled) {
1202 amdgpu_dpm_enable_uvd(adev, true);
1203 } else {
1204 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
2990a1fc
AD
1205 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1206 AMD_CG_STATE_UNGATE);
1207 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1208 AMD_PG_STATE_UNGATE);
d38ceaf9
AD
1209 }
1210 }
1211}
c4120d55
CK
1212
1213void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1214{
14a8032a 1215 if (!amdgpu_sriov_vf(ring->adev))
5c53d19b 1216 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
c4120d55 1217}
8de190c9
CK
1218
1219/**
1220 * amdgpu_uvd_ring_test_ib - test ib execution
1221 *
1222 * @ring: amdgpu_ring pointer
1223 *
1224 * Test if we can successfully execute an IB
1225 */
bbec97aa 1226int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
8de190c9 1227{
f54d1867 1228 struct dma_fence *fence;
bbec97aa 1229 long r;
10dd74ea 1230 uint32_t ip_instance = ring->me;
8de190c9
CK
1231
1232 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1233 if (r) {
10dd74ea 1234 DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
8de190c9
CK
1235 goto error;
1236 }
1237
1238 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1239 if (r) {
10dd74ea 1240 DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
8de190c9
CK
1241 goto error;
1242 }
1243
f54d1867 1244 r = dma_fence_wait_timeout(fence, false, timeout);
bbec97aa 1245 if (r == 0) {
10dd74ea 1246 DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
bbec97aa
CK
1247 r = -ETIMEDOUT;
1248 } else if (r < 0) {
10dd74ea 1249 DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
bbec97aa 1250 } else {
10dd74ea 1251 DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
bbec97aa 1252 r = 0;
8de190c9 1253 }
bbec97aa 1254
f54d1867 1255 dma_fence_put(fence);
c2a4c5b7
JC
1256
1257error:
8de190c9
CK
1258 return r;
1259}
44879b62
AN
1260
1261/**
1262 * amdgpu_uvd_used_handles - returns used UVD handles
1263 *
1264 * @adev: amdgpu_device pointer
1265 *
1266 * Returns the number of UVD handles in use
1267 */
1268uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1269{
1270 unsigned i;
1271 uint32_t used_handles = 0;
1272
1273 for (i = 0; i < adev->uvd.max_handles; ++i) {
1274 /*
1275 * Handles can be freed in any order, and not
1276 * necessarily linear. So we need to count
1277 * all non-zero handles.
1278 */
2bb795f5 1279 if (atomic_read(&adev->uvd.inst->handles[i]))
44879b62
AN
1280 used_handles++;
1281 }
1282
1283 return used_handles;
1284}