Revert "drm/amdgpu: Add an ATPX quirk for hybrid laptop"
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
CommitLineData
95d0906f
LL
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <drm/drmP.h>
30#include <drm/drm.h>
31
32#include "amdgpu.h"
33#include "amdgpu_pm.h"
34#include "amdgpu_vcn.h"
35#include "soc15d.h"
36#include "soc15_common.h"
37
b1ebd7c0 38#include "vcn/vcn_1_0_offset.h"
95d0906f
LL
39
40/* 1 second timeout */
41#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43/* Firmware Names */
44#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
45
46MODULE_FIRMWARE(FIRMWARE_RAVEN);
47
48static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49
50int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51{
95d0906f
LL
52 unsigned long bo_size;
53 const char *fw_name;
54 const struct common_firmware_header *hdr;
55 unsigned version_major, version_minor, family_id;
56 int r;
57
58 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
59
60 switch (adev->asic_type) {
61 case CHIP_RAVEN:
62 fw_name = FIRMWARE_RAVEN;
63 break;
64 default:
65 return -EINVAL;
66 }
67
68 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
69 if (r) {
70 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
71 fw_name);
72 return r;
73 }
74
75 r = amdgpu_ucode_validate(adev->vcn.fw);
76 if (r) {
77 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
78 fw_name);
79 release_firmware(adev->vcn.fw);
80 adev->vcn.fw = NULL;
81 return r;
82 }
83
84 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
85 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
86 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
87 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
88 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
89 version_major, version_minor, family_id);
90
91
92 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
93 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
94 + AMDGPU_VCN_SESSION_SIZE * 40;
95 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
96 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
97 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
98 if (r) {
99 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
100 return r;
101 }
102
95d0906f
LL
103 return 0;
104}
105
106int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
107{
101c6fee
LL
108 int i;
109
95d0906f
LL
110 kfree(adev->vcn.saved_bo);
111
95d0906f
LL
112 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
113 &adev->vcn.gpu_addr,
114 (void **)&adev->vcn.cpu_addr);
115
116 amdgpu_ring_fini(&adev->vcn.ring_dec);
117
101c6fee
LL
118 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
119 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
120
95d0906f
LL
121 release_firmware(adev->vcn.fw);
122
123 return 0;
124}
125
126int amdgpu_vcn_suspend(struct amdgpu_device *adev)
127{
128 unsigned size;
129 void *ptr;
130
131 if (adev->vcn.vcpu_bo == NULL)
132 return 0;
133
134 cancel_delayed_work_sync(&adev->vcn.idle_work);
135
136 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
137 ptr = adev->vcn.cpu_addr;
138
139 adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
140 if (!adev->vcn.saved_bo)
141 return -ENOMEM;
142
143 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
144
145 return 0;
146}
147
148int amdgpu_vcn_resume(struct amdgpu_device *adev)
149{
150 unsigned size;
151 void *ptr;
152
153 if (adev->vcn.vcpu_bo == NULL)
154 return -EINVAL;
155
156 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
157 ptr = adev->vcn.cpu_addr;
158
159 if (adev->vcn.saved_bo != NULL) {
160 memcpy_toio(ptr, adev->vcn.saved_bo, size);
161 kfree(adev->vcn.saved_bo);
162 adev->vcn.saved_bo = NULL;
163 } else {
164 const struct common_firmware_header *hdr;
165 unsigned offset;
166
167 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
168 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
169 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
170 le32_to_cpu(hdr->ucode_size_bytes));
171 size -= le32_to_cpu(hdr->ucode_size_bytes);
172 ptr += le32_to_cpu(hdr->ucode_size_bytes);
173 memset_io(ptr, 0, size);
174 }
175
176 return 0;
177}
178
3e1086cf
LL
179static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
180{
181 struct amdgpu_device *adev =
182 container_of(work, struct amdgpu_device, vcn.idle_work.work);
183 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
646e906d
AD
184 unsigned i;
185
186 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
187 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
188 }
3e1086cf
LL
189
190 if (fences == 0) {
22cc6c5e 191 if (adev->pm.dpm_enabled)
3e1086cf 192 amdgpu_dpm_enable_uvd(adev, false);
22cc6c5e
RZ
193 else
194 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
195 AMD_PG_STATE_GATE);
3e1086cf
LL
196 } else {
197 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
198 }
199}
200
201void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
202{
203 struct amdgpu_device *adev = ring->adev;
204 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
205
aef060e1 206 if (set_clocks && adev->pm.dpm_enabled) {
22cc6c5e
RZ
207 if (adev->pm.dpm_enabled)
208 amdgpu_dpm_enable_uvd(adev, true);
209 else
210 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
211 AMD_PG_STATE_UNGATE);
3e1086cf
LL
212 }
213}
214
215void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
216{
217 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
218}
219
8c303c01
LL
220int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
221{
222 struct amdgpu_device *adev = ring->adev;
223 uint32_t tmp = 0;
224 unsigned i;
225 int r;
226
227 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
228 r = amdgpu_ring_alloc(ring, 3);
229 if (r) {
230 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
231 ring->idx, r);
232 return r;
233 }
234 amdgpu_ring_write(ring,
235 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
236 amdgpu_ring_write(ring, 0xDEADBEEF);
237 amdgpu_ring_commit(ring);
238 for (i = 0; i < adev->usec_timeout; i++) {
239 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
240 if (tmp == 0xDEADBEEF)
241 break;
242 DRM_UDELAY(1);
243 }
244
245 if (i < adev->usec_timeout) {
9953b72f 246 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
8c303c01
LL
247 ring->idx, i);
248 } else {
249 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
250 ring->idx, tmp);
251 r = -EINVAL;
252 }
253 return r;
254}
255
add9f9a8 256static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
4c6530fd 257 struct amdgpu_bo *bo,
add9f9a8 258 struct dma_fence **fence)
95d0906f 259{
add9f9a8
CK
260 struct amdgpu_device *adev = ring->adev;
261 struct dma_fence *f = NULL;
95d0906f
LL
262 struct amdgpu_job *job;
263 struct amdgpu_ib *ib;
95d0906f
LL
264 uint64_t addr;
265 int i, r;
266
95d0906f
LL
267 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
268 if (r)
269 goto err;
270
271 ib = &job->ibs[0];
272 addr = amdgpu_bo_gpu_offset(bo);
273 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
274 ib->ptr[1] = addr;
275 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
276 ib->ptr[3] = addr >> 32;
277 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
278 ib->ptr[5] = 0;
279 for (i = 6; i < 16; i += 2) {
280 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
281 ib->ptr[i+1] = 0;
282 }
283 ib->length_dw = 16;
284
4c6530fd
LL
285 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
286 job->fence = dma_fence_get(f);
287 if (r)
288 goto err_free;
95d0906f 289
4c6530fd 290 amdgpu_job_free(job);
95d0906f 291
add9f9a8
CK
292 amdgpu_bo_fence(bo, f, false);
293 amdgpu_bo_unreserve(bo);
294 amdgpu_bo_unref(&bo);
95d0906f
LL
295
296 if (fence)
297 *fence = dma_fence_get(f);
95d0906f
LL
298 dma_fence_put(f);
299
300 return 0;
301
302err_free:
303 amdgpu_job_free(job);
304
305err:
add9f9a8
CK
306 amdgpu_bo_unreserve(bo);
307 amdgpu_bo_unref(&bo);
95d0906f
LL
308 return r;
309}
310
311static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
312 struct dma_fence **fence)
313{
314 struct amdgpu_device *adev = ring->adev;
add9f9a8 315 struct amdgpu_bo *bo = NULL;
95d0906f
LL
316 uint32_t *msg;
317 int r, i;
318
add9f9a8
CK
319 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
320 AMDGPU_GEM_DOMAIN_VRAM,
321 &bo, NULL, (void **)&msg);
95d0906f
LL
322 if (r)
323 return r;
324
2d8a425b 325 msg[0] = cpu_to_le32(0x00000028);
3b8f5ab3 326 msg[1] = cpu_to_le32(0x00000038);
2d8a425b 327 msg[2] = cpu_to_le32(0x00000001);
95d0906f 328 msg[3] = cpu_to_le32(0x00000000);
2d8a425b 329 msg[4] = cpu_to_le32(handle);
95d0906f 330 msg[5] = cpu_to_le32(0x00000000);
2d8a425b
LL
331 msg[6] = cpu_to_le32(0x00000001);
332 msg[7] = cpu_to_le32(0x00000028);
3b8f5ab3 333 msg[8] = cpu_to_le32(0x00000010);
95d0906f 334 msg[9] = cpu_to_le32(0x00000000);
2d8a425b
LL
335 msg[10] = cpu_to_le32(0x00000007);
336 msg[11] = cpu_to_le32(0x00000000);
3b8f5ab3
LL
337 msg[12] = cpu_to_le32(0x00000780);
338 msg[13] = cpu_to_le32(0x00000440);
339 for (i = 14; i < 1024; ++i)
95d0906f
LL
340 msg[i] = cpu_to_le32(0x0);
341
4c6530fd 342 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
343}
344
345static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
4c6530fd 346 struct dma_fence **fence)
95d0906f
LL
347{
348 struct amdgpu_device *adev = ring->adev;
add9f9a8 349 struct amdgpu_bo *bo = NULL;
95d0906f
LL
350 uint32_t *msg;
351 int r, i;
352
add9f9a8
CK
353 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
354 AMDGPU_GEM_DOMAIN_VRAM,
355 &bo, NULL, (void **)&msg);
95d0906f
LL
356 if (r)
357 return r;
358
2d8a425b
LL
359 msg[0] = cpu_to_le32(0x00000028);
360 msg[1] = cpu_to_le32(0x00000018);
361 msg[2] = cpu_to_le32(0x00000000);
362 msg[3] = cpu_to_le32(0x00000002);
363 msg[4] = cpu_to_le32(handle);
364 msg[5] = cpu_to_le32(0x00000000);
365 for (i = 6; i < 1024; ++i)
95d0906f
LL
366 msg[i] = cpu_to_le32(0x0);
367
4c6530fd 368 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
95d0906f
LL
369}
370
95d0906f
LL
371int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
372{
373 struct dma_fence *fence;
374 long r;
375
376 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
377 if (r) {
378 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
379 goto error;
380 }
381
4c6530fd 382 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
95d0906f
LL
383 if (r) {
384 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
385 goto error;
386 }
387
388 r = dma_fence_wait_timeout(fence, false, timeout);
389 if (r == 0) {
390 DRM_ERROR("amdgpu: IB test timed out.\n");
391 r = -ETIMEDOUT;
392 } else if (r < 0) {
393 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
394 } else {
9953b72f 395 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
95d0906f
LL
396 r = 0;
397 }
398
399 dma_fence_put(fence);
400
401error:
402 return r;
403}
2d531d81 404
3e1086cf
LL
405int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
406{
407 struct amdgpu_device *adev = ring->adev;
408 uint32_t rptr = amdgpu_ring_get_rptr(ring);
409 unsigned i;
410 int r;
411
412 r = amdgpu_ring_alloc(ring, 16);
413 if (r) {
414 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
415 ring->idx, r);
416 return r;
417 }
c3bd3040 418 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
3e1086cf
LL
419 amdgpu_ring_commit(ring);
420
421 for (i = 0; i < adev->usec_timeout; i++) {
422 if (amdgpu_ring_get_rptr(ring) != rptr)
423 break;
424 DRM_UDELAY(1);
425 }
426
427 if (i < adev->usec_timeout) {
9953b72f 428 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
3e1086cf
LL
429 ring->idx, i);
430 } else {
431 DRM_ERROR("amdgpu: ring %d test failed\n",
432 ring->idx);
433 r = -ETIMEDOUT;
434 }
435
436 return r;
437}
438
2d531d81
LL
439static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
440 struct dma_fence **fence)
441{
25547cfd 442 const unsigned ib_size_dw = 16;
2d531d81
LL
443 struct amdgpu_job *job;
444 struct amdgpu_ib *ib;
445 struct dma_fence *f = NULL;
446 uint64_t dummy;
447 int i, r;
448
449 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
450 if (r)
451 return r;
452
453 ib = &job->ibs[0];
2d531d81
LL
454 dummy = ib->gpu_addr + 1024;
455
2d531d81 456 ib->length_dw = 0;
25547cfd
LL
457 ib->ptr[ib->length_dw++] = 0x00000018;
458 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
2d531d81 459 ib->ptr[ib->length_dw++] = handle;
25547cfd
LL
460 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
461 ib->ptr[ib->length_dw++] = dummy;
462 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 463
25547cfd
LL
464 ib->ptr[ib->length_dw++] = 0x00000014;
465 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
466 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81
LL
467 ib->ptr[ib->length_dw++] = 0x00000000;
468 ib->ptr[ib->length_dw++] = 0x00000000;
469
25547cfd
LL
470 ib->ptr[ib->length_dw++] = 0x00000008;
471 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
2d531d81
LL
472
473 for (i = ib->length_dw; i < ib_size_dw; ++i)
474 ib->ptr[i] = 0x0;
475
476 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
477 job->fence = dma_fence_get(f);
478 if (r)
479 goto err;
480
481 amdgpu_job_free(job);
482 if (fence)
483 *fence = dma_fence_get(f);
484 dma_fence_put(f);
25547cfd 485
2d531d81
LL
486 return 0;
487
488err:
489 amdgpu_job_free(job);
490 return r;
491}
492
493static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
25547cfd 494 struct dma_fence **fence)
2d531d81 495{
25547cfd 496 const unsigned ib_size_dw = 16;
2d531d81
LL
497 struct amdgpu_job *job;
498 struct amdgpu_ib *ib;
499 struct dma_fence *f = NULL;
25547cfd 500 uint64_t dummy;
2d531d81
LL
501 int i, r;
502
503 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
504 if (r)
505 return r;
506
507 ib = &job->ibs[0];
25547cfd 508 dummy = ib->gpu_addr + 1024;
2d531d81 509
2d531d81 510 ib->length_dw = 0;
25547cfd
LL
511 ib->ptr[ib->length_dw++] = 0x00000018;
512 ib->ptr[ib->length_dw++] = 0x00000001;
2d531d81 513 ib->ptr[ib->length_dw++] = handle;
25547cfd
LL
514 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
515 ib->ptr[ib->length_dw++] = dummy;
516 ib->ptr[ib->length_dw++] = 0x0000000b;
2d531d81 517
25547cfd
LL
518 ib->ptr[ib->length_dw++] = 0x00000014;
519 ib->ptr[ib->length_dw++] = 0x00000002;
520 ib->ptr[ib->length_dw++] = 0x0000001c;
2d531d81 521 ib->ptr[ib->length_dw++] = 0x00000000;
2d531d81
LL
522 ib->ptr[ib->length_dw++] = 0x00000000;
523
25547cfd
LL
524 ib->ptr[ib->length_dw++] = 0x00000008;
525 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
2d531d81
LL
526
527 for (i = ib->length_dw; i < ib_size_dw; ++i)
528 ib->ptr[i] = 0x0;
529
25547cfd
LL
530 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
531 job->fence = dma_fence_get(f);
532 if (r)
533 goto err;
2d531d81 534
25547cfd 535 amdgpu_job_free(job);
2d531d81
LL
536 if (fence)
537 *fence = dma_fence_get(f);
538 dma_fence_put(f);
25547cfd 539
2d531d81
LL
540 return 0;
541
542err:
543 amdgpu_job_free(job);
544 return r;
545}
546
2d531d81
LL
547int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
548{
549 struct dma_fence *fence = NULL;
550 long r;
551
552 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
553 if (r) {
554 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
555 goto error;
556 }
557
25547cfd 558 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
2d531d81
LL
559 if (r) {
560 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
561 goto error;
562 }
563
564 r = dma_fence_wait_timeout(fence, false, timeout);
565 if (r == 0) {
566 DRM_ERROR("amdgpu: IB test timed out.\n");
567 r = -ETIMEDOUT;
568 } else if (r < 0) {
569 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
570 } else {
9953b72f 571 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
2d531d81
LL
572 r = 0;
573 }
574error:
575 dma_fence_put(fence);
576 return r;
577}