Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2013 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | * Authors: Christian König <christian.koenig@amd.com> | |
26 | */ | |
27 | ||
28 | #include <linux/firmware.h> | |
29 | #include <linux/module.h> | |
30 | #include <drm/drmP.h> | |
31 | #include <drm/drm.h> | |
32 | ||
33 | #include "amdgpu.h" | |
34 | #include "amdgpu_pm.h" | |
35 | #include "amdgpu_vce.h" | |
36 | #include "cikd.h" | |
37 | ||
38 | /* 1 second timeout */ | |
182830a1 | 39 | #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) |
d38ceaf9 AD |
40 | |
41 | /* Firmware Names */ | |
42 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
43 | #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" | |
edf600da CK |
44 | #define FIRMWARE_KABINI "radeon/kabini_vce.bin" |
45 | #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" | |
46 | #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" | |
d38ceaf9 AD |
47 | #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" |
48 | #endif | |
c65444fe JZ |
49 | #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" |
50 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" | |
188a9bcd | 51 | #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" |
cfaba566 | 52 | #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" |
2cc0c0b5 FC |
53 | #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" |
54 | #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" | |
d38ceaf9 AD |
55 | |
56 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
57 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |
58 | MODULE_FIRMWARE(FIRMWARE_KABINI); | |
59 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | |
60 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | |
61 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | |
62 | #endif | |
63 | MODULE_FIRMWARE(FIRMWARE_TONGA); | |
64 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | |
188a9bcd | 65 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
cfaba566 | 66 | MODULE_FIRMWARE(FIRMWARE_STONEY); |
2cc0c0b5 FC |
67 | MODULE_FIRMWARE(FIRMWARE_POLARIS10); |
68 | MODULE_FIRMWARE(FIRMWARE_POLARIS11); | |
d38ceaf9 AD |
69 | |
70 | static void amdgpu_vce_idle_work_handler(struct work_struct *work); | |
71 | ||
72 | /** | |
73 | * amdgpu_vce_init - allocate memory, load vce firmware | |
74 | * | |
75 | * @adev: amdgpu_device pointer | |
76 | * | |
77 | * First step to get VCE online, allocate memory and load the firmware | |
78 | */ | |
e9822622 | 79 | int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) |
d38ceaf9 | 80 | { |
c594989c CK |
81 | struct amdgpu_ring *ring; |
82 | struct amd_sched_rq *rq; | |
d38ceaf9 AD |
83 | const char *fw_name; |
84 | const struct common_firmware_header *hdr; | |
85 | unsigned ucode_version, version_major, version_minor, binary_id; | |
86 | int i, r; | |
87 | ||
d38ceaf9 AD |
88 | switch (adev->asic_type) { |
89 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
90 | case CHIP_BONAIRE: | |
91 | fw_name = FIRMWARE_BONAIRE; | |
92 | break; | |
93 | case CHIP_KAVERI: | |
94 | fw_name = FIRMWARE_KAVERI; | |
95 | break; | |
96 | case CHIP_KABINI: | |
97 | fw_name = FIRMWARE_KABINI; | |
98 | break; | |
99 | case CHIP_HAWAII: | |
100 | fw_name = FIRMWARE_HAWAII; | |
101 | break; | |
102 | case CHIP_MULLINS: | |
103 | fw_name = FIRMWARE_MULLINS; | |
104 | break; | |
105 | #endif | |
106 | case CHIP_TONGA: | |
107 | fw_name = FIRMWARE_TONGA; | |
108 | break; | |
109 | case CHIP_CARRIZO: | |
110 | fw_name = FIRMWARE_CARRIZO; | |
111 | break; | |
188a9bcd AD |
112 | case CHIP_FIJI: |
113 | fw_name = FIRMWARE_FIJI; | |
114 | break; | |
cfaba566 SL |
115 | case CHIP_STONEY: |
116 | fw_name = FIRMWARE_STONEY; | |
117 | break; | |
2cc0c0b5 FC |
118 | case CHIP_POLARIS10: |
119 | fw_name = FIRMWARE_POLARIS10; | |
1b4eeea5 | 120 | break; |
2cc0c0b5 FC |
121 | case CHIP_POLARIS11: |
122 | fw_name = FIRMWARE_POLARIS11; | |
1b4eeea5 | 123 | break; |
d38ceaf9 AD |
124 | |
125 | default: | |
126 | return -EINVAL; | |
127 | } | |
128 | ||
129 | r = request_firmware(&adev->vce.fw, fw_name, adev->dev); | |
130 | if (r) { | |
131 | dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", | |
132 | fw_name); | |
133 | return r; | |
134 | } | |
135 | ||
136 | r = amdgpu_ucode_validate(adev->vce.fw); | |
137 | if (r) { | |
138 | dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", | |
139 | fw_name); | |
140 | release_firmware(adev->vce.fw); | |
141 | adev->vce.fw = NULL; | |
142 | return r; | |
143 | } | |
144 | ||
145 | hdr = (const struct common_firmware_header *)adev->vce.fw->data; | |
146 | ||
147 | ucode_version = le32_to_cpu(hdr->ucode_version); | |
148 | version_major = (ucode_version >> 20) & 0xfff; | |
149 | version_minor = (ucode_version >> 8) & 0xfff; | |
150 | binary_id = ucode_version & 0xff; | |
151 | DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", | |
152 | version_major, version_minor, binary_id); | |
153 | adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | | |
154 | (binary_id << 8)); | |
155 | ||
156 | /* allocate firmware, stack and heap BO */ | |
157 | ||
d38ceaf9 | 158 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
857d913d | 159 | AMDGPU_GEM_DOMAIN_VRAM, |
03f48dd5 CK |
160 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
161 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | |
72d7668b | 162 | NULL, NULL, &adev->vce.vcpu_bo); |
d38ceaf9 AD |
163 | if (r) { |
164 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); | |
165 | return r; | |
166 | } | |
167 | ||
168 | r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); | |
169 | if (r) { | |
170 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | |
171 | dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); | |
172 | return r; | |
173 | } | |
174 | ||
175 | r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | |
176 | &adev->vce.gpu_addr); | |
177 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | |
178 | if (r) { | |
179 | amdgpu_bo_unref(&adev->vce.vcpu_bo); | |
180 | dev_err(adev->dev, "(%d) VCE bo pin failed\n", r); | |
181 | return r; | |
182 | } | |
183 | ||
c594989c CK |
184 | |
185 | ring = &adev->vce.ring[0]; | |
186 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; | |
187 | r = amd_sched_entity_init(&ring->sched, &adev->vce.entity, | |
188 | rq, amdgpu_sched_jobs); | |
189 | if (r != 0) { | |
190 | DRM_ERROR("Failed setting up VCE run queue.\n"); | |
191 | return r; | |
192 | } | |
193 | ||
d38ceaf9 AD |
194 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { |
195 | atomic_set(&adev->vce.handles[i], 0); | |
196 | adev->vce.filp[i] = NULL; | |
197 | } | |
198 | ||
ebff485e CK |
199 | INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); |
200 | mutex_init(&adev->vce.idle_mutex); | |
201 | ||
d38ceaf9 AD |
202 | return 0; |
203 | } | |
204 | ||
205 | /** | |
206 | * amdgpu_vce_fini - free memory | |
207 | * | |
208 | * @adev: amdgpu_device pointer | |
209 | * | |
210 | * Last step on VCE teardown, free firmware memory | |
211 | */ | |
212 | int amdgpu_vce_sw_fini(struct amdgpu_device *adev) | |
213 | { | |
4cd00d37 GI |
214 | unsigned i; |
215 | ||
d38ceaf9 AD |
216 | if (adev->vce.vcpu_bo == NULL) |
217 | return 0; | |
218 | ||
c594989c CK |
219 | amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); |
220 | ||
d38ceaf9 AD |
221 | amdgpu_bo_unref(&adev->vce.vcpu_bo); |
222 | ||
4cd00d37 GI |
223 | for (i = 0; i < adev->vce.num_rings; i++) |
224 | amdgpu_ring_fini(&adev->vce.ring[i]); | |
d38ceaf9 AD |
225 | |
226 | release_firmware(adev->vce.fw); | |
ebff485e | 227 | mutex_destroy(&adev->vce.idle_mutex); |
d38ceaf9 AD |
228 | |
229 | return 0; | |
230 | } | |
231 | ||
232 | /** | |
233 | * amdgpu_vce_suspend - unpin VCE fw memory | |
234 | * | |
235 | * @adev: amdgpu_device pointer | |
236 | * | |
237 | */ | |
238 | int amdgpu_vce_suspend(struct amdgpu_device *adev) | |
239 | { | |
240 | int i; | |
241 | ||
242 | if (adev->vce.vcpu_bo == NULL) | |
243 | return 0; | |
244 | ||
245 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | |
246 | if (atomic_read(&adev->vce.handles[i])) | |
247 | break; | |
248 | ||
249 | if (i == AMDGPU_MAX_VCE_HANDLES) | |
250 | return 0; | |
251 | ||
85cc88f0 | 252 | cancel_delayed_work_sync(&adev->vce.idle_work); |
d38ceaf9 AD |
253 | /* TODO: suspending running encoding sessions isn't supported */ |
254 | return -EINVAL; | |
255 | } | |
256 | ||
257 | /** | |
258 | * amdgpu_vce_resume - pin VCE fw memory | |
259 | * | |
260 | * @adev: amdgpu_device pointer | |
261 | * | |
262 | */ | |
263 | int amdgpu_vce_resume(struct amdgpu_device *adev) | |
264 | { | |
265 | void *cpu_addr; | |
266 | const struct common_firmware_header *hdr; | |
267 | unsigned offset; | |
268 | int r; | |
269 | ||
270 | if (adev->vce.vcpu_bo == NULL) | |
271 | return -EINVAL; | |
272 | ||
273 | r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); | |
274 | if (r) { | |
275 | dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); | |
276 | return r; | |
277 | } | |
278 | ||
279 | r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); | |
280 | if (r) { | |
281 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | |
282 | dev_err(adev->dev, "(%d) VCE map failed\n", r); | |
283 | return r; | |
284 | } | |
285 | ||
286 | hdr = (const struct common_firmware_header *)adev->vce.fw->data; | |
287 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
7b4d3e29 CK |
288 | memcpy_toio(cpu_addr, adev->vce.fw->data + offset, |
289 | adev->vce.fw->size - offset); | |
d38ceaf9 AD |
290 | |
291 | amdgpu_bo_kunmap(adev->vce.vcpu_bo); | |
292 | ||
293 | amdgpu_bo_unreserve(adev->vce.vcpu_bo); | |
294 | ||
295 | return 0; | |
296 | } | |
297 | ||
298 | /** | |
299 | * amdgpu_vce_idle_work_handler - power off VCE | |
300 | * | |
301 | * @work: pointer to work structure | |
302 | * | |
303 | * power of VCE when it's not used any more | |
304 | */ | |
305 | static void amdgpu_vce_idle_work_handler(struct work_struct *work) | |
306 | { | |
307 | struct amdgpu_device *adev = | |
308 | container_of(work, struct amdgpu_device, vce.idle_work.work); | |
24c5fe56 | 309 | unsigned i, count = 0; |
d38ceaf9 | 310 | |
24c5fe56 AD |
311 | for (i = 0; i < adev->vce.num_rings; i++) |
312 | count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); | |
313 | ||
314 | if (count == 0) { | |
d38ceaf9 AD |
315 | if (adev->pm.dpm_enabled) { |
316 | amdgpu_dpm_enable_vce(adev, false); | |
317 | } else { | |
318 | amdgpu_asic_set_vce_clocks(adev, 0, 0); | |
319 | } | |
320 | } else { | |
182830a1 | 321 | schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); |
d38ceaf9 AD |
322 | } |
323 | } | |
324 | ||
325 | /** | |
ebff485e | 326 | * amdgpu_vce_ring_begin_use - power up VCE |
d38ceaf9 | 327 | * |
ebff485e | 328 | * @ring: amdgpu ring |
d38ceaf9 AD |
329 | * |
330 | * Make sure VCE is powerd up when we want to use it | |
331 | */ | |
ebff485e | 332 | void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) |
d38ceaf9 | 333 | { |
ebff485e CK |
334 | struct amdgpu_device *adev = ring->adev; |
335 | bool set_clocks; | |
d38ceaf9 | 336 | |
ebff485e CK |
337 | mutex_lock(&adev->vce.idle_mutex); |
338 | set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); | |
182830a1 | 339 | if (set_clocks) { |
d38ceaf9 AD |
340 | if (adev->pm.dpm_enabled) { |
341 | amdgpu_dpm_enable_vce(adev, true); | |
342 | } else { | |
343 | amdgpu_asic_set_vce_clocks(adev, 53300, 40000); | |
344 | } | |
345 | } | |
ebff485e CK |
346 | mutex_unlock(&adev->vce.idle_mutex); |
347 | } | |
348 | ||
349 | /** | |
350 | * amdgpu_vce_ring_end_use - power VCE down | |
351 | * | |
352 | * @ring: amdgpu ring | |
353 | * | |
354 | * Schedule work to power VCE down again | |
355 | */ | |
356 | void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) | |
357 | { | |
358 | schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); | |
d38ceaf9 AD |
359 | } |
360 | ||
361 | /** | |
362 | * amdgpu_vce_free_handles - free still open VCE handles | |
363 | * | |
364 | * @adev: amdgpu_device pointer | |
365 | * @filp: drm file pointer | |
366 | * | |
367 | * Close all VCE handles still open by this file pointer | |
368 | */ | |
369 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |
370 | { | |
371 | struct amdgpu_ring *ring = &adev->vce.ring[0]; | |
372 | int i, r; | |
373 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | |
374 | uint32_t handle = atomic_read(&adev->vce.handles[i]); | |
182830a1 | 375 | |
d38ceaf9 AD |
376 | if (!handle || adev->vce.filp[i] != filp) |
377 | continue; | |
378 | ||
9f2ade33 | 379 | r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); |
d38ceaf9 AD |
380 | if (r) |
381 | DRM_ERROR("Error destroying VCE handle (%d)!\n", r); | |
382 | ||
383 | adev->vce.filp[i] = NULL; | |
384 | atomic_set(&adev->vce.handles[i], 0); | |
385 | } | |
386 | } | |
387 | ||
388 | /** | |
389 | * amdgpu_vce_get_create_msg - generate a VCE create msg | |
390 | * | |
391 | * @adev: amdgpu_device pointer | |
392 | * @ring: ring we should submit the msg to | |
393 | * @handle: VCE session handle to use | |
394 | * @fence: optional fence to return | |
395 | * | |
396 | * Open up a stream for HW test | |
397 | */ | |
398 | int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
f54d1867 | 399 | struct dma_fence **fence) |
d38ceaf9 AD |
400 | { |
401 | const unsigned ib_size_dw = 1024; | |
d71518b5 CK |
402 | struct amdgpu_job *job; |
403 | struct amdgpu_ib *ib; | |
f54d1867 | 404 | struct dma_fence *f = NULL; |
d38ceaf9 AD |
405 | uint64_t dummy; |
406 | int i, r; | |
407 | ||
d71518b5 CK |
408 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); |
409 | if (r) | |
d38ceaf9 | 410 | return r; |
d71518b5 CK |
411 | |
412 | ib = &job->ibs[0]; | |
d38ceaf9 | 413 | |
8128765c | 414 | dummy = ib->gpu_addr + 1024; |
d38ceaf9 AD |
415 | |
416 | /* stitch together an VCE create msg */ | |
8128765c CZ |
417 | ib->length_dw = 0; |
418 | ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ | |
419 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ | |
420 | ib->ptr[ib->length_dw++] = handle; | |
421 | ||
d66f8e48 LL |
422 | if ((ring->adev->vce.fw_version >> 24) >= 52) |
423 | ib->ptr[ib->length_dw++] = 0x00000040; /* len */ | |
424 | else | |
425 | ib->ptr[ib->length_dw++] = 0x00000030; /* len */ | |
8128765c CZ |
426 | ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ |
427 | ib->ptr[ib->length_dw++] = 0x00000000; | |
428 | ib->ptr[ib->length_dw++] = 0x00000042; | |
429 | ib->ptr[ib->length_dw++] = 0x0000000a; | |
430 | ib->ptr[ib->length_dw++] = 0x00000001; | |
431 | ib->ptr[ib->length_dw++] = 0x00000080; | |
432 | ib->ptr[ib->length_dw++] = 0x00000060; | |
433 | ib->ptr[ib->length_dw++] = 0x00000100; | |
434 | ib->ptr[ib->length_dw++] = 0x00000100; | |
435 | ib->ptr[ib->length_dw++] = 0x0000000c; | |
436 | ib->ptr[ib->length_dw++] = 0x00000000; | |
d66f8e48 LL |
437 | if ((ring->adev->vce.fw_version >> 24) >= 52) { |
438 | ib->ptr[ib->length_dw++] = 0x00000000; | |
439 | ib->ptr[ib->length_dw++] = 0x00000000; | |
440 | ib->ptr[ib->length_dw++] = 0x00000000; | |
441 | ib->ptr[ib->length_dw++] = 0x00000000; | |
442 | } | |
8128765c CZ |
443 | |
444 | ib->ptr[ib->length_dw++] = 0x00000014; /* len */ | |
445 | ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ | |
446 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); | |
447 | ib->ptr[ib->length_dw++] = dummy; | |
448 | ib->ptr[ib->length_dw++] = 0x00000001; | |
449 | ||
450 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
451 | ib->ptr[i] = 0x0; | |
452 | ||
c5637837 | 453 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
f54d1867 | 454 | job->fence = dma_fence_get(f); |
8128765c CZ |
455 | if (r) |
456 | goto err; | |
9f2ade33 CK |
457 | |
458 | amdgpu_job_free(job); | |
d38ceaf9 | 459 | if (fence) |
f54d1867 CW |
460 | *fence = dma_fence_get(f); |
461 | dma_fence_put(f); | |
cadf97b1 | 462 | return 0; |
d71518b5 | 463 | |
8128765c | 464 | err: |
d71518b5 | 465 | amdgpu_job_free(job); |
d38ceaf9 AD |
466 | return r; |
467 | } | |
468 | ||
469 | /** | |
470 | * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg | |
471 | * | |
472 | * @adev: amdgpu_device pointer | |
473 | * @ring: ring we should submit the msg to | |
474 | * @handle: VCE session handle to use | |
475 | * @fence: optional fence to return | |
476 | * | |
477 | * Close up a stream for HW test or if userspace failed to do so | |
478 | */ | |
479 | int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
f54d1867 | 480 | bool direct, struct dma_fence **fence) |
d38ceaf9 AD |
481 | { |
482 | const unsigned ib_size_dw = 1024; | |
d71518b5 CK |
483 | struct amdgpu_job *job; |
484 | struct amdgpu_ib *ib; | |
f54d1867 | 485 | struct dma_fence *f = NULL; |
d38ceaf9 AD |
486 | int i, r; |
487 | ||
d71518b5 CK |
488 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); |
489 | if (r) | |
d38ceaf9 | 490 | return r; |
d38ceaf9 | 491 | |
d71518b5 | 492 | ib = &job->ibs[0]; |
d38ceaf9 AD |
493 | |
494 | /* stitch together an VCE destroy msg */ | |
8128765c CZ |
495 | ib->length_dw = 0; |
496 | ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ | |
497 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ | |
498 | ib->ptr[ib->length_dw++] = handle; | |
499 | ||
99453a9e RZ |
500 | ib->ptr[ib->length_dw++] = 0x00000020; /* len */ |
501 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ | |
502 | ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ | |
503 | ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ | |
504 | ib->ptr[ib->length_dw++] = 0x00000000; | |
505 | ib->ptr[ib->length_dw++] = 0x00000000; | |
506 | ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ | |
507 | ib->ptr[ib->length_dw++] = 0x00000000; | |
8128765c CZ |
508 | |
509 | ib->ptr[ib->length_dw++] = 0x00000008; /* len */ | |
510 | ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ | |
511 | ||
512 | for (i = ib->length_dw; i < ib_size_dw; ++i) | |
513 | ib->ptr[i] = 0x0; | |
9f2ade33 CK |
514 | |
515 | if (direct) { | |
c5637837 | 516 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); |
f54d1867 | 517 | job->fence = dma_fence_get(f); |
9f2ade33 CK |
518 | if (r) |
519 | goto err; | |
520 | ||
521 | amdgpu_job_free(job); | |
522 | } else { | |
c594989c | 523 | r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, |
9f2ade33 CK |
524 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
525 | if (r) | |
526 | goto err; | |
527 | } | |
528 | ||
d38ceaf9 | 529 | if (fence) |
f54d1867 CW |
530 | *fence = dma_fence_get(f); |
531 | dma_fence_put(f); | |
cadf97b1 | 532 | return 0; |
d71518b5 | 533 | |
8128765c | 534 | err: |
d71518b5 | 535 | amdgpu_job_free(job); |
d38ceaf9 AD |
536 | return r; |
537 | } | |
538 | ||
539 | /** | |
540 | * amdgpu_vce_cs_reloc - command submission relocation | |
541 | * | |
542 | * @p: parser context | |
543 | * @lo: address of lower dword | |
544 | * @hi: address of higher dword | |
f1689ec1 | 545 | * @size: minimum size |
d38ceaf9 AD |
546 | * |
547 | * Patch relocation inside command stream with real buffer address | |
548 | */ | |
f1689ec1 | 549 | static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, |
dc78330a | 550 | int lo, int hi, unsigned size, uint32_t index) |
d38ceaf9 AD |
551 | { |
552 | struct amdgpu_bo_va_mapping *mapping; | |
d38ceaf9 AD |
553 | struct amdgpu_bo *bo; |
554 | uint64_t addr; | |
555 | ||
dc78330a CK |
556 | if (index == 0xffffffff) |
557 | index = 0; | |
558 | ||
d38ceaf9 AD |
559 | addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | |
560 | ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; | |
dc78330a | 561 | addr += ((uint64_t)size) * ((uint64_t)index); |
d38ceaf9 AD |
562 | |
563 | mapping = amdgpu_cs_find_mapping(p, addr, &bo); | |
564 | if (mapping == NULL) { | |
dc78330a CK |
565 | DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", |
566 | addr, lo, hi, size, index); | |
d38ceaf9 AD |
567 | return -EINVAL; |
568 | } | |
569 | ||
f1689ec1 CK |
570 | if ((addr + (uint64_t)size) > |
571 | ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { | |
572 | DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", | |
573 | addr, lo, hi); | |
574 | return -EINVAL; | |
575 | } | |
576 | ||
d38ceaf9 AD |
577 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; |
578 | addr += amdgpu_bo_gpu_offset(bo); | |
dc78330a | 579 | addr -= ((uint64_t)size) * ((uint64_t)index); |
d38ceaf9 | 580 | |
7270f839 CK |
581 | amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); |
582 | amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); | |
d38ceaf9 AD |
583 | |
584 | return 0; | |
585 | } | |
586 | ||
f1689ec1 CK |
587 | /** |
588 | * amdgpu_vce_validate_handle - validate stream handle | |
589 | * | |
590 | * @p: parser context | |
591 | * @handle: handle to validate | |
2f4b9368 | 592 | * @allocated: allocated a new handle? |
f1689ec1 CK |
593 | * |
594 | * Validates the handle and return the found session index or -EINVAL | |
595 | * we we don't have another free session index. | |
596 | */ | |
597 | static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, | |
e5223214 | 598 | uint32_t handle, uint32_t *allocated) |
f1689ec1 CK |
599 | { |
600 | unsigned i; | |
601 | ||
602 | /* validate the handle */ | |
603 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | |
2f4b9368 CK |
604 | if (atomic_read(&p->adev->vce.handles[i]) == handle) { |
605 | if (p->adev->vce.filp[i] != p->filp) { | |
606 | DRM_ERROR("VCE handle collision detected!\n"); | |
607 | return -EINVAL; | |
608 | } | |
f1689ec1 | 609 | return i; |
2f4b9368 | 610 | } |
f1689ec1 CK |
611 | } |
612 | ||
613 | /* handle not found try to alloc a new one */ | |
614 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { | |
615 | if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { | |
616 | p->adev->vce.filp[i] = p->filp; | |
617 | p->adev->vce.img_size[i] = 0; | |
e5223214 | 618 | *allocated |= 1 << i; |
f1689ec1 CK |
619 | return i; |
620 | } | |
621 | } | |
622 | ||
623 | DRM_ERROR("No more free VCE handles!\n"); | |
624 | return -EINVAL; | |
625 | } | |
626 | ||
d38ceaf9 AD |
627 | /** |
628 | * amdgpu_vce_cs_parse - parse and validate the command stream | |
629 | * | |
630 | * @p: parser context | |
631 | * | |
632 | */ | |
633 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) | |
634 | { | |
50838c8c | 635 | struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; |
dc78330a | 636 | unsigned fb_idx = 0, bs_idx = 0; |
f1689ec1 | 637 | int session_idx = -1; |
e5223214 CK |
638 | uint32_t destroyed = 0; |
639 | uint32_t created = 0; | |
640 | uint32_t allocated = 0; | |
f1689ec1 CK |
641 | uint32_t tmp, handle = 0; |
642 | uint32_t *size = &tmp; | |
c855e250 CK |
643 | int i, r, idx = 0; |
644 | ||
45088efc CK |
645 | p->job->vm = NULL; |
646 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | |
647 | ||
c855e250 CK |
648 | r = amdgpu_cs_sysvm_access_required(p); |
649 | if (r) | |
650 | return r; | |
d38ceaf9 | 651 | |
d38ceaf9 AD |
652 | while (idx < ib->length_dw) { |
653 | uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); | |
654 | uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); | |
655 | ||
656 | if ((len < 8) || (len & 3)) { | |
657 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | |
2f4b9368 CK |
658 | r = -EINVAL; |
659 | goto out; | |
d38ceaf9 AD |
660 | } |
661 | ||
662 | switch (cmd) { | |
182830a1 | 663 | case 0x00000001: /* session */ |
d38ceaf9 | 664 | handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); |
2f4b9368 CK |
665 | session_idx = amdgpu_vce_validate_handle(p, handle, |
666 | &allocated); | |
e5223214 CK |
667 | if (session_idx < 0) { |
668 | r = session_idx; | |
669 | goto out; | |
670 | } | |
f1689ec1 | 671 | size = &p->adev->vce.img_size[session_idx]; |
d38ceaf9 AD |
672 | break; |
673 | ||
182830a1 | 674 | case 0x00000002: /* task info */ |
dc78330a CK |
675 | fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); |
676 | bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); | |
f1689ec1 CK |
677 | break; |
678 | ||
182830a1 | 679 | case 0x01000001: /* create */ |
e5223214 CK |
680 | created |= 1 << session_idx; |
681 | if (destroyed & (1 << session_idx)) { | |
682 | destroyed &= ~(1 << session_idx); | |
683 | allocated |= 1 << session_idx; | |
684 | ||
685 | } else if (!(allocated & (1 << session_idx))) { | |
2f4b9368 CK |
686 | DRM_ERROR("Handle already in use!\n"); |
687 | r = -EINVAL; | |
688 | goto out; | |
689 | } | |
690 | ||
f1689ec1 CK |
691 | *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * |
692 | amdgpu_get_ib_value(p, ib_idx, idx + 10) * | |
693 | 8 * 3 / 2; | |
694 | break; | |
695 | ||
182830a1 CK |
696 | case 0x04000001: /* config extension */ |
697 | case 0x04000002: /* pic control */ | |
698 | case 0x04000005: /* rate control */ | |
699 | case 0x04000007: /* motion estimation */ | |
700 | case 0x04000008: /* rdo */ | |
701 | case 0x04000009: /* vui */ | |
702 | case 0x05000002: /* auxiliary buffer */ | |
4f827785 | 703 | case 0x05000009: /* clock table */ |
d38ceaf9 AD |
704 | break; |
705 | ||
5eeda8a4 AD |
706 | case 0x0500000c: /* hw config */ |
707 | switch (p->adev->asic_type) { | |
708 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
709 | case CHIP_KAVERI: | |
710 | case CHIP_MULLINS: | |
711 | #endif | |
712 | case CHIP_CARRIZO: | |
713 | break; | |
714 | default: | |
715 | r = -EINVAL; | |
716 | goto out; | |
717 | } | |
718 | break; | |
719 | ||
182830a1 | 720 | case 0x03000001: /* encode */ |
f1689ec1 | 721 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, |
dc78330a | 722 | *size, 0); |
d38ceaf9 | 723 | if (r) |
2f4b9368 | 724 | goto out; |
d38ceaf9 | 725 | |
f1689ec1 | 726 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, |
dc78330a | 727 | *size / 3, 0); |
d38ceaf9 | 728 | if (r) |
2f4b9368 | 729 | goto out; |
d38ceaf9 AD |
730 | break; |
731 | ||
182830a1 | 732 | case 0x02000001: /* destroy */ |
e5223214 | 733 | destroyed |= 1 << session_idx; |
d38ceaf9 AD |
734 | break; |
735 | ||
182830a1 | 736 | case 0x05000001: /* context buffer */ |
f1689ec1 | 737 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, |
dc78330a | 738 | *size * 2, 0); |
f1689ec1 | 739 | if (r) |
2f4b9368 | 740 | goto out; |
f1689ec1 CK |
741 | break; |
742 | ||
182830a1 | 743 | case 0x05000004: /* video bitstream buffer */ |
f1689ec1 CK |
744 | tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); |
745 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, | |
dc78330a | 746 | tmp, bs_idx); |
f1689ec1 | 747 | if (r) |
2f4b9368 | 748 | goto out; |
f1689ec1 CK |
749 | break; |
750 | ||
182830a1 | 751 | case 0x05000005: /* feedback buffer */ |
f1689ec1 | 752 | r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, |
dc78330a | 753 | 4096, fb_idx); |
d38ceaf9 | 754 | if (r) |
2f4b9368 | 755 | goto out; |
d38ceaf9 AD |
756 | break; |
757 | ||
758 | default: | |
759 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); | |
2f4b9368 CK |
760 | r = -EINVAL; |
761 | goto out; | |
d38ceaf9 AD |
762 | } |
763 | ||
f1689ec1 CK |
764 | if (session_idx == -1) { |
765 | DRM_ERROR("no session command at start of IB\n"); | |
2f4b9368 CK |
766 | r = -EINVAL; |
767 | goto out; | |
f1689ec1 CK |
768 | } |
769 | ||
d38ceaf9 AD |
770 | idx += len / 4; |
771 | } | |
772 | ||
e5223214 | 773 | if (allocated & ~created) { |
2f4b9368 CK |
774 | DRM_ERROR("New session without create command!\n"); |
775 | r = -ENOENT; | |
776 | } | |
777 | ||
778 | out: | |
e5223214 CK |
779 | if (!r) { |
780 | /* No error, free all destroyed handle slots */ | |
781 | tmp = destroyed; | |
782 | } else { | |
783 | /* Error during parsing, free all allocated handle slots */ | |
784 | tmp = allocated; | |
d38ceaf9 AD |
785 | } |
786 | ||
e5223214 CK |
787 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) |
788 | if (tmp & (1 << i)) | |
789 | atomic_set(&p->adev->vce.handles[i], 0); | |
790 | ||
2f4b9368 | 791 | return r; |
d38ceaf9 AD |
792 | } |
793 | ||
98614701 CK |
794 | /** |
795 | * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode | |
796 | * | |
797 | * @p: parser context | |
798 | * | |
799 | */ | |
800 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) | |
801 | { | |
802 | struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; | |
803 | int session_idx = -1; | |
804 | uint32_t destroyed = 0; | |
805 | uint32_t created = 0; | |
806 | uint32_t allocated = 0; | |
807 | uint32_t tmp, handle = 0; | |
808 | int i, r = 0, idx = 0; | |
809 | ||
810 | while (idx < ib->length_dw) { | |
811 | uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); | |
812 | uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); | |
813 | ||
814 | if ((len < 8) || (len & 3)) { | |
815 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | |
816 | r = -EINVAL; | |
817 | goto out; | |
818 | } | |
819 | ||
820 | switch (cmd) { | |
821 | case 0x00000001: /* session */ | |
822 | handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); | |
823 | session_idx = amdgpu_vce_validate_handle(p, handle, | |
824 | &allocated); | |
825 | if (session_idx < 0) { | |
826 | r = session_idx; | |
827 | goto out; | |
828 | } | |
829 | break; | |
830 | ||
831 | case 0x01000001: /* create */ | |
832 | created |= 1 << session_idx; | |
833 | if (destroyed & (1 << session_idx)) { | |
834 | destroyed &= ~(1 << session_idx); | |
835 | allocated |= 1 << session_idx; | |
836 | ||
837 | } else if (!(allocated & (1 << session_idx))) { | |
838 | DRM_ERROR("Handle already in use!\n"); | |
839 | r = -EINVAL; | |
840 | goto out; | |
841 | } | |
842 | ||
843 | break; | |
844 | ||
845 | case 0x02000001: /* destroy */ | |
846 | destroyed |= 1 << session_idx; | |
847 | break; | |
848 | ||
849 | default: | |
850 | break; | |
851 | } | |
852 | ||
853 | if (session_idx == -1) { | |
854 | DRM_ERROR("no session command at start of IB\n"); | |
855 | r = -EINVAL; | |
856 | goto out; | |
857 | } | |
858 | ||
859 | idx += len / 4; | |
860 | } | |
861 | ||
862 | if (allocated & ~created) { | |
863 | DRM_ERROR("New session without create command!\n"); | |
864 | r = -ENOENT; | |
865 | } | |
866 | ||
867 | out: | |
868 | if (!r) { | |
869 | /* No error, free all destroyed handle slots */ | |
870 | tmp = destroyed; | |
871 | amdgpu_ib_free(p->adev, ib, NULL); | |
872 | } else { | |
873 | /* Error during parsing, free all allocated handle slots */ | |
874 | tmp = allocated; | |
875 | } | |
876 | ||
877 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | |
878 | if (tmp & (1 << i)) | |
879 | atomic_set(&p->adev->vce.handles[i], 0); | |
880 | ||
881 | return r; | |
882 | } | |
883 | ||
d38ceaf9 AD |
884 | /** |
885 | * amdgpu_vce_ring_emit_ib - execute indirect buffer | |
886 | * | |
887 | * @ring: engine to use | |
888 | * @ib: the IB to execute | |
889 | * | |
890 | */ | |
d88bf583 CK |
891 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, |
892 | unsigned vm_id, bool ctx_switch) | |
d38ceaf9 AD |
893 | { |
894 | amdgpu_ring_write(ring, VCE_CMD_IB); | |
895 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | |
896 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
897 | amdgpu_ring_write(ring, ib->length_dw); | |
898 | } | |
899 | ||
900 | /** | |
901 | * amdgpu_vce_ring_emit_fence - add a fence command to the ring | |
902 | * | |
903 | * @ring: engine to use | |
904 | * @fence: the fence | |
905 | * | |
906 | */ | |
907 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
890ee23f | 908 | unsigned flags) |
d38ceaf9 | 909 | { |
890ee23f | 910 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
d38ceaf9 AD |
911 | |
912 | amdgpu_ring_write(ring, VCE_CMD_FENCE); | |
913 | amdgpu_ring_write(ring, addr); | |
914 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
915 | amdgpu_ring_write(ring, seq); | |
916 | amdgpu_ring_write(ring, VCE_CMD_TRAP); | |
917 | amdgpu_ring_write(ring, VCE_CMD_END); | |
918 | } | |
919 | ||
920 | /** | |
921 | * amdgpu_vce_ring_test_ring - test if VCE ring is working | |
922 | * | |
923 | * @ring: the engine to test on | |
924 | * | |
925 | */ | |
926 | int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |
927 | { | |
928 | struct amdgpu_device *adev = ring->adev; | |
929 | uint32_t rptr = amdgpu_ring_get_rptr(ring); | |
930 | unsigned i; | |
931 | int r; | |
932 | ||
a27de35c | 933 | r = amdgpu_ring_alloc(ring, 16); |
d38ceaf9 AD |
934 | if (r) { |
935 | DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", | |
936 | ring->idx, r); | |
937 | return r; | |
938 | } | |
939 | amdgpu_ring_write(ring, VCE_CMD_END); | |
a27de35c | 940 | amdgpu_ring_commit(ring); |
d38ceaf9 AD |
941 | |
942 | for (i = 0; i < adev->usec_timeout; i++) { | |
943 | if (amdgpu_ring_get_rptr(ring) != rptr) | |
944 | break; | |
945 | DRM_UDELAY(1); | |
946 | } | |
947 | ||
948 | if (i < adev->usec_timeout) { | |
949 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | |
950 | ring->idx, i); | |
951 | } else { | |
952 | DRM_ERROR("amdgpu: ring %d test failed\n", | |
953 | ring->idx); | |
954 | r = -ETIMEDOUT; | |
955 | } | |
956 | ||
957 | return r; | |
958 | } | |
959 | ||
960 | /** | |
961 | * amdgpu_vce_ring_test_ib - test if VCE IBs are working | |
962 | * | |
963 | * @ring: the engine to test on | |
964 | * | |
965 | */ | |
bbec97aa | 966 | int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
d38ceaf9 | 967 | { |
f54d1867 | 968 | struct dma_fence *fence = NULL; |
bbec97aa | 969 | long r; |
d38ceaf9 | 970 | |
6f0359ff AD |
971 | /* skip vce ring1/2 ib test for now, since it's not reliable */ |
972 | if (ring != &ring->adev->vce.ring[0]) | |
898e50d4 LL |
973 | return 0; |
974 | ||
d38ceaf9 AD |
975 | r = amdgpu_vce_get_create_msg(ring, 1, NULL); |
976 | if (r) { | |
bbec97aa | 977 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); |
d38ceaf9 AD |
978 | goto error; |
979 | } | |
980 | ||
9f2ade33 | 981 | r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); |
d38ceaf9 | 982 | if (r) { |
bbec97aa | 983 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); |
d38ceaf9 AD |
984 | goto error; |
985 | } | |
986 | ||
f54d1867 | 987 | r = dma_fence_wait_timeout(fence, false, timeout); |
bbec97aa CK |
988 | if (r == 0) { |
989 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
990 | r = -ETIMEDOUT; | |
991 | } else if (r < 0) { | |
992 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
d38ceaf9 AD |
993 | } else { |
994 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | |
bbec97aa | 995 | r = 0; |
d38ceaf9 AD |
996 | } |
997 | error: | |
f54d1867 | 998 | dma_fence_put(fence); |
d38ceaf9 AD |
999 | return r; |
1000 | } |