Commit | Line | Data |
---|---|---|
ecc0b326 MD |
1 | /* |
2 | * Copyright 2009 VMware, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Michel Dänzer | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include <drm/radeon_drm.h> | |
26 | #include "radeon_reg.h" | |
27 | #include "radeon.h" | |
28 | ||
009ee7a0 AD |
29 | #define RADEON_TEST_COPY_BLIT 1 |
30 | #define RADEON_TEST_COPY_DMA 0 | |
31 | ||
ecc0b326 MD |
32 | |
33 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | |
009ee7a0 | 34 | static void radeon_do_test_moves(struct radeon_device *rdev, int flag) |
ecc0b326 | 35 | { |
4c788679 JG |
36 | struct radeon_bo *vram_obj = NULL; |
37 | struct radeon_bo **gtt_obj = NULL; | |
ecc0b326 | 38 | uint64_t gtt_addr, vram_addr; |
89cd67b3 DC |
39 | unsigned n, size; |
40 | int i, r, ring; | |
009ee7a0 AD |
41 | |
42 | switch (flag) { | |
43 | case RADEON_TEST_COPY_DMA: | |
44 | ring = radeon_copy_dma_ring_index(rdev); | |
45 | break; | |
46 | case RADEON_TEST_COPY_BLIT: | |
47 | ring = radeon_copy_blit_ring_index(rdev); | |
48 | break; | |
49 | default: | |
50 | DRM_ERROR("Unknown copy method\n"); | |
51 | return; | |
52 | } | |
ecc0b326 MD |
53 | |
54 | size = 1024 * 1024; | |
55 | ||
56 | /* Number of tests = | |
24cae9e7 | 57 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
ecc0b326 | 58 | */ |
2c6316cb | 59 | n = rdev->mc.gtt_size - rdev->gart_pin_size; |
24cae9e7 | 60 | n /= size; |
ecc0b326 MD |
61 | |
62 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | |
63 | if (!gtt_obj) { | |
64 | DRM_ERROR("Failed to allocate %d pointers\n", n); | |
65 | r = 1; | |
66 | goto out_cleanup; | |
67 | } | |
68 | ||
441921d5 | 69 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
831b6966 | 70 | 0, NULL, NULL, &vram_obj); |
ecc0b326 MD |
71 | if (r) { |
72 | DRM_ERROR("Failed to create VRAM object\n"); | |
73 | goto out_cleanup; | |
74 | } | |
4c788679 JG |
75 | r = radeon_bo_reserve(vram_obj, false); |
76 | if (unlikely(r != 0)) | |
977c38d5 | 77 | goto out_unref; |
4c788679 | 78 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); |
ecc0b326 MD |
79 | if (r) { |
80 | DRM_ERROR("Failed to pin VRAM object\n"); | |
977c38d5 | 81 | goto out_unres; |
ecc0b326 | 82 | } |
ecc0b326 MD |
83 | for (i = 0; i < n; i++) { |
84 | void *gtt_map, *vram_map; | |
85 | void **gtt_start, **gtt_end; | |
86 | void **vram_start, **vram_end; | |
977c38d5 | 87 | struct radeon_fence *fence = NULL; |
ecc0b326 | 88 | |
441921d5 | 89 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
831b6966 ML |
90 | RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, |
91 | gtt_obj + i); | |
ecc0b326 MD |
92 | if (r) { |
93 | DRM_ERROR("Failed to create GTT object %d\n", i); | |
977c38d5 | 94 | goto out_lclean; |
ecc0b326 MD |
95 | } |
96 | ||
4c788679 JG |
97 | r = radeon_bo_reserve(gtt_obj[i], false); |
98 | if (unlikely(r != 0)) | |
977c38d5 | 99 | goto out_lclean_unref; |
4c788679 | 100 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); |
ecc0b326 MD |
101 | if (r) { |
102 | DRM_ERROR("Failed to pin GTT object %d\n", i); | |
977c38d5 | 103 | goto out_lclean_unres; |
ecc0b326 MD |
104 | } |
105 | ||
4c788679 | 106 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
107 | if (r) { |
108 | DRM_ERROR("Failed to map GTT object %d\n", i); | |
977c38d5 | 109 | goto out_lclean_unpin; |
ecc0b326 MD |
110 | } |
111 | ||
112 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | |
113 | gtt_start < gtt_end; | |
114 | gtt_start++) | |
115 | *gtt_start = gtt_start; | |
116 | ||
4c788679 | 117 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 | 118 | |
009ee7a0 | 119 | if (ring == R600_RING_TYPE_DMA_INDEX) |
57d20a43 CK |
120 | fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, |
121 | size / RADEON_GPU_PAGE_SIZE, | |
92b712b7 | 122 | vram_obj->tbo.resv); |
009ee7a0 | 123 | else |
57d20a43 CK |
124 | fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, |
125 | size / RADEON_GPU_PAGE_SIZE, | |
92b712b7 | 126 | vram_obj->tbo.resv); |
57d20a43 | 127 | if (IS_ERR(fence)) { |
ecc0b326 | 128 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); |
57d20a43 | 129 | r = PTR_ERR(fence); |
977c38d5 | 130 | goto out_lclean_unpin; |
ecc0b326 MD |
131 | } |
132 | ||
133 | r = radeon_fence_wait(fence, false); | |
134 | if (r) { | |
135 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | |
977c38d5 | 136 | goto out_lclean_unpin; |
ecc0b326 MD |
137 | } |
138 | ||
139 | radeon_fence_unref(&fence); | |
140 | ||
4c788679 | 141 | r = radeon_bo_kmap(vram_obj, &vram_map); |
ecc0b326 MD |
142 | if (r) { |
143 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | |
977c38d5 | 144 | goto out_lclean_unpin; |
ecc0b326 MD |
145 | } |
146 | ||
147 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
148 | vram_start = vram_map, vram_end = vram_map + size; | |
149 | vram_start < vram_end; | |
150 | gtt_start++, vram_start++) { | |
151 | if (*vram_start != gtt_start) { | |
152 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | |
4fb1a35c MD |
153 | "expected 0x%p (GTT/VRAM offset " |
154 | "0x%16llx/0x%16llx)\n", | |
155 | i, *vram_start, gtt_start, | |
156 | (unsigned long long) | |
157 | (gtt_addr - rdev->mc.gtt_start + | |
158 | (void*)gtt_start - gtt_map), | |
159 | (unsigned long long) | |
160 | (vram_addr - rdev->mc.vram_start + | |
161 | (void*)gtt_start - gtt_map)); | |
4c788679 | 162 | radeon_bo_kunmap(vram_obj); |
977c38d5 | 163 | goto out_lclean_unpin; |
ecc0b326 MD |
164 | } |
165 | *vram_start = vram_start; | |
166 | } | |
167 | ||
4c788679 | 168 | radeon_bo_kunmap(vram_obj); |
ecc0b326 | 169 | |
009ee7a0 | 170 | if (ring == R600_RING_TYPE_DMA_INDEX) |
57d20a43 CK |
171 | fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, |
172 | size / RADEON_GPU_PAGE_SIZE, | |
92b712b7 | 173 | vram_obj->tbo.resv); |
009ee7a0 | 174 | else |
57d20a43 CK |
175 | fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, |
176 | size / RADEON_GPU_PAGE_SIZE, | |
92b712b7 | 177 | vram_obj->tbo.resv); |
57d20a43 | 178 | if (IS_ERR(fence)) { |
ecc0b326 | 179 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); |
57d20a43 | 180 | r = PTR_ERR(fence); |
977c38d5 | 181 | goto out_lclean_unpin; |
ecc0b326 MD |
182 | } |
183 | ||
184 | r = radeon_fence_wait(fence, false); | |
185 | if (r) { | |
186 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | |
977c38d5 | 187 | goto out_lclean_unpin; |
ecc0b326 MD |
188 | } |
189 | ||
190 | radeon_fence_unref(&fence); | |
191 | ||
4c788679 | 192 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
193 | if (r) { |
194 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | |
977c38d5 | 195 | goto out_lclean_unpin; |
ecc0b326 MD |
196 | } |
197 | ||
198 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
199 | vram_start = vram_map, vram_end = vram_map + size; | |
200 | gtt_start < gtt_end; | |
201 | gtt_start++, vram_start++) { | |
202 | if (*gtt_start != vram_start) { | |
203 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | |
4fb1a35c MD |
204 | "expected 0x%p (VRAM/GTT offset " |
205 | "0x%16llx/0x%16llx)\n", | |
206 | i, *gtt_start, vram_start, | |
207 | (unsigned long long) | |
208 | (vram_addr - rdev->mc.vram_start + | |
209 | (void*)vram_start - vram_map), | |
210 | (unsigned long long) | |
211 | (gtt_addr - rdev->mc.gtt_start + | |
212 | (void*)vram_start - vram_map)); | |
4c788679 | 213 | radeon_bo_kunmap(gtt_obj[i]); |
977c38d5 | 214 | goto out_lclean_unpin; |
ecc0b326 MD |
215 | } |
216 | } | |
217 | ||
4c788679 | 218 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 MD |
219 | |
220 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | |
d594e46a | 221 | gtt_addr - rdev->mc.gtt_start); |
977c38d5 ML |
222 | continue; |
223 | ||
224 | out_lclean_unpin: | |
225 | radeon_bo_unpin(gtt_obj[i]); | |
226 | out_lclean_unres: | |
227 | radeon_bo_unreserve(gtt_obj[i]); | |
228 | out_lclean_unref: | |
229 | radeon_bo_unref(>t_obj[i]); | |
230 | out_lclean: | |
231 | for (--i; i >= 0; --i) { | |
232 | radeon_bo_unpin(gtt_obj[i]); | |
233 | radeon_bo_unreserve(gtt_obj[i]); | |
234 | radeon_bo_unref(>t_obj[i]); | |
235 | } | |
57d20a43 | 236 | if (fence && !IS_ERR(fence)) |
977c38d5 ML |
237 | radeon_fence_unref(&fence); |
238 | break; | |
ecc0b326 MD |
239 | } |
240 | ||
977c38d5 ML |
241 | radeon_bo_unpin(vram_obj); |
242 | out_unres: | |
243 | radeon_bo_unreserve(vram_obj); | |
244 | out_unref: | |
245 | radeon_bo_unref(&vram_obj); | |
ecc0b326 | 246 | out_cleanup: |
977c38d5 | 247 | kfree(gtt_obj); |
ecc0b326 MD |
248 | if (r) { |
249 | printk(KERN_WARNING "Error while testing BO move.\n"); | |
250 | } | |
251 | } | |
60a7e396 | 252 | |
009ee7a0 AD |
253 | void radeon_test_moves(struct radeon_device *rdev) |
254 | { | |
255 | if (rdev->asic->copy.dma) | |
256 | radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); | |
257 | if (rdev->asic->copy.blit) | |
258 | radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); | |
259 | } | |
260 | ||
f2ba57b5 CK |
261 | static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, |
262 | struct radeon_ring *ring, | |
263 | struct radeon_fence **fence) | |
264 | { | |
d93f7937 | 265 | uint32_t handle = ring->idx ^ 0xdeafbeef; |
f2ba57b5 CK |
266 | int r; |
267 | ||
268 | if (ring->idx == R600_RING_TYPE_UVD_INDEX) { | |
d93f7937 | 269 | r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); |
f2ba57b5 CK |
270 | if (r) { |
271 | DRM_ERROR("Failed to get dummy create msg\n"); | |
272 | return r; | |
273 | } | |
274 | ||
d93f7937 | 275 | r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); |
f2ba57b5 CK |
276 | if (r) { |
277 | DRM_ERROR("Failed to get dummy destroy msg\n"); | |
278 | return r; | |
279 | } | |
d93f7937 CK |
280 | |
281 | } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || | |
282 | ring->idx == TN_RING_TYPE_VCE2_INDEX) { | |
283 | r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); | |
284 | if (r) { | |
285 | DRM_ERROR("Failed to get dummy create msg\n"); | |
286 | return r; | |
287 | } | |
288 | ||
289 | r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); | |
290 | if (r) { | |
291 | DRM_ERROR("Failed to get dummy destroy msg\n"); | |
292 | return r; | |
293 | } | |
294 | ||
f2ba57b5 CK |
295 | } else { |
296 | r = radeon_ring_lock(rdev, ring, 64); | |
297 | if (r) { | |
298 | DRM_ERROR("Failed to lock ring A %d\n", ring->idx); | |
299 | return r; | |
300 | } | |
301 | radeon_fence_emit(rdev, fence, ring->idx); | |
1538a9e0 | 302 | radeon_ring_unlock_commit(rdev, ring, false); |
f2ba57b5 CK |
303 | } |
304 | return 0; | |
305 | } | |
306 | ||
60a7e396 | 307 | void radeon_test_ring_sync(struct radeon_device *rdev, |
e32eb50d CK |
308 | struct radeon_ring *ringA, |
309 | struct radeon_ring *ringB) | |
60a7e396 | 310 | { |
ce954884 | 311 | struct radeon_fence *fence1 = NULL, *fence2 = NULL; |
60a7e396 | 312 | struct radeon_semaphore *semaphore = NULL; |
60a7e396 CK |
313 | int r; |
314 | ||
60a7e396 CK |
315 | r = radeon_semaphore_create(rdev, &semaphore); |
316 | if (r) { | |
317 | DRM_ERROR("Failed to create semaphore\n"); | |
318 | goto out_cleanup; | |
319 | } | |
320 | ||
e32eb50d | 321 | r = radeon_ring_lock(rdev, ringA, 64); |
60a7e396 | 322 | if (r) { |
8b25ed34 | 323 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
60a7e396 CK |
324 | goto out_cleanup; |
325 | } | |
8b25ed34 | 326 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
1538a9e0 | 327 | radeon_ring_unlock_commit(rdev, ringA, false); |
f2ba57b5 CK |
328 | |
329 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); | |
330 | if (r) | |
876dc9f3 | 331 | goto out_cleanup; |
f2ba57b5 CK |
332 | |
333 | r = radeon_ring_lock(rdev, ringA, 64); | |
876dc9f3 | 334 | if (r) { |
f2ba57b5 | 335 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
876dc9f3 CK |
336 | goto out_cleanup; |
337 | } | |
f2ba57b5 | 338 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
1538a9e0 | 339 | radeon_ring_unlock_commit(rdev, ringA, false); |
60a7e396 | 340 | |
f2ba57b5 CK |
341 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); |
342 | if (r) | |
343 | goto out_cleanup; | |
344 | ||
60a7e396 CK |
345 | mdelay(1000); |
346 | ||
ce954884 CK |
347 | if (radeon_fence_signaled(fence1)) { |
348 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); | |
60a7e396 CK |
349 | goto out_cleanup; |
350 | } | |
351 | ||
e32eb50d | 352 | r = radeon_ring_lock(rdev, ringB, 64); |
60a7e396 | 353 | if (r) { |
e32eb50d | 354 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
355 | goto out_cleanup; |
356 | } | |
8b25ed34 | 357 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
1538a9e0 | 358 | radeon_ring_unlock_commit(rdev, ringB, false); |
60a7e396 | 359 | |
ce954884 CK |
360 | r = radeon_fence_wait(fence1, false); |
361 | if (r) { | |
362 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
363 | goto out_cleanup; | |
364 | } | |
365 | ||
366 | mdelay(1000); | |
367 | ||
368 | if (radeon_fence_signaled(fence2)) { | |
369 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); | |
370 | goto out_cleanup; | |
371 | } | |
372 | ||
373 | r = radeon_ring_lock(rdev, ringB, 64); | |
60a7e396 | 374 | if (r) { |
ce954884 | 375 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
376 | goto out_cleanup; |
377 | } | |
8b25ed34 | 378 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
1538a9e0 | 379 | radeon_ring_unlock_commit(rdev, ringB, false); |
60a7e396 | 380 | |
ce954884 CK |
381 | r = radeon_fence_wait(fence2, false); |
382 | if (r) { | |
383 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
384 | goto out_cleanup; | |
385 | } | |
60a7e396 CK |
386 | |
387 | out_cleanup: | |
220907d9 | 388 | radeon_semaphore_free(rdev, &semaphore, NULL); |
60a7e396 | 389 | |
ce954884 CK |
390 | if (fence1) |
391 | radeon_fence_unref(&fence1); | |
392 | ||
393 | if (fence2) | |
394 | radeon_fence_unref(&fence2); | |
395 | ||
396 | if (r) | |
397 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
398 | } | |
399 | ||
1109ca09 | 400 | static void radeon_test_ring_sync2(struct radeon_device *rdev, |
ce954884 CK |
401 | struct radeon_ring *ringA, |
402 | struct radeon_ring *ringB, | |
403 | struct radeon_ring *ringC) | |
404 | { | |
405 | struct radeon_fence *fenceA = NULL, *fenceB = NULL; | |
406 | struct radeon_semaphore *semaphore = NULL; | |
ce954884 CK |
407 | bool sigA, sigB; |
408 | int i, r; | |
409 | ||
ce954884 CK |
410 | r = radeon_semaphore_create(rdev, &semaphore); |
411 | if (r) { | |
412 | DRM_ERROR("Failed to create semaphore\n"); | |
413 | goto out_cleanup; | |
414 | } | |
415 | ||
416 | r = radeon_ring_lock(rdev, ringA, 64); | |
417 | if (r) { | |
8b25ed34 | 418 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
ce954884 CK |
419 | goto out_cleanup; |
420 | } | |
8b25ed34 | 421 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
1538a9e0 | 422 | radeon_ring_unlock_commit(rdev, ringA, false); |
ce954884 | 423 | |
f2ba57b5 CK |
424 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); |
425 | if (r) | |
426 | goto out_cleanup; | |
427 | ||
ce954884 CK |
428 | r = radeon_ring_lock(rdev, ringB, 64); |
429 | if (r) { | |
8b25ed34 | 430 | DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); |
ce954884 CK |
431 | goto out_cleanup; |
432 | } | |
8b25ed34 | 433 | radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); |
1538a9e0 | 434 | radeon_ring_unlock_commit(rdev, ringB, false); |
f2ba57b5 CK |
435 | r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); |
436 | if (r) | |
437 | goto out_cleanup; | |
ce954884 CK |
438 | |
439 | mdelay(1000); | |
440 | ||
441 | if (radeon_fence_signaled(fenceA)) { | |
442 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | |
443 | goto out_cleanup; | |
444 | } | |
445 | if (radeon_fence_signaled(fenceB)) { | |
f2ba57b5 | 446 | DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); |
ce954884 CK |
447 | goto out_cleanup; |
448 | } | |
449 | ||
450 | r = radeon_ring_lock(rdev, ringC, 64); | |
451 | if (r) { | |
452 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
453 | goto out_cleanup; | |
454 | } | |
8b25ed34 | 455 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
1538a9e0 | 456 | radeon_ring_unlock_commit(rdev, ringC, false); |
ce954884 CK |
457 | |
458 | for (i = 0; i < 30; ++i) { | |
459 | mdelay(100); | |
460 | sigA = radeon_fence_signaled(fenceA); | |
461 | sigB = radeon_fence_signaled(fenceB); | |
462 | if (sigA || sigB) | |
463 | break; | |
464 | } | |
465 | ||
466 | if (!sigA && !sigB) { | |
467 | DRM_ERROR("Neither fence A nor B has been signaled\n"); | |
468 | goto out_cleanup; | |
469 | } else if (sigA && sigB) { | |
470 | DRM_ERROR("Both fence A and B has been signaled\n"); | |
471 | goto out_cleanup; | |
472 | } | |
473 | ||
474 | DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); | |
475 | ||
476 | r = radeon_ring_lock(rdev, ringC, 64); | |
477 | if (r) { | |
478 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
479 | goto out_cleanup; | |
480 | } | |
8b25ed34 | 481 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
1538a9e0 | 482 | radeon_ring_unlock_commit(rdev, ringC, false); |
ce954884 CK |
483 | |
484 | mdelay(1000); | |
485 | ||
486 | r = radeon_fence_wait(fenceA, false); | |
487 | if (r) { | |
488 | DRM_ERROR("Failed to wait for sync fence A\n"); | |
489 | goto out_cleanup; | |
490 | } | |
491 | r = radeon_fence_wait(fenceB, false); | |
492 | if (r) { | |
493 | DRM_ERROR("Failed to wait for sync fence B\n"); | |
494 | goto out_cleanup; | |
495 | } | |
496 | ||
497 | out_cleanup: | |
220907d9 | 498 | radeon_semaphore_free(rdev, &semaphore, NULL); |
ce954884 CK |
499 | |
500 | if (fenceA) | |
501 | radeon_fence_unref(&fenceA); | |
502 | ||
503 | if (fenceB) | |
504 | radeon_fence_unref(&fenceB); | |
60a7e396 CK |
505 | |
506 | if (r) | |
507 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
508 | } | |
509 | ||
d93f7937 CK |
510 | static bool radeon_test_sync_possible(struct radeon_ring *ringA, |
511 | struct radeon_ring *ringB) | |
512 | { | |
513 | if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && | |
514 | ringB->idx == TN_RING_TYPE_VCE1_INDEX) | |
515 | return false; | |
516 | ||
517 | return true; | |
518 | } | |
519 | ||
60a7e396 CK |
520 | void radeon_test_syncing(struct radeon_device *rdev) |
521 | { | |
ce954884 | 522 | int i, j, k; |
60a7e396 CK |
523 | |
524 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { | |
e32eb50d CK |
525 | struct radeon_ring *ringA = &rdev->ring[i]; |
526 | if (!ringA->ready) | |
60a7e396 CK |
527 | continue; |
528 | ||
529 | for (j = 0; j < i; ++j) { | |
e32eb50d CK |
530 | struct radeon_ring *ringB = &rdev->ring[j]; |
531 | if (!ringB->ready) | |
60a7e396 CK |
532 | continue; |
533 | ||
d93f7937 CK |
534 | if (!radeon_test_sync_possible(ringA, ringB)) |
535 | continue; | |
536 | ||
ce954884 | 537 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); |
e32eb50d | 538 | radeon_test_ring_sync(rdev, ringA, ringB); |
60a7e396 | 539 | |
ce954884 | 540 | DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); |
e32eb50d | 541 | radeon_test_ring_sync(rdev, ringB, ringA); |
ce954884 CK |
542 | |
543 | for (k = 0; k < j; ++k) { | |
544 | struct radeon_ring *ringC = &rdev->ring[k]; | |
1f2e124d AD |
545 | if (!ringC->ready) |
546 | continue; | |
ce954884 | 547 | |
d93f7937 CK |
548 | if (!radeon_test_sync_possible(ringA, ringC)) |
549 | continue; | |
550 | ||
551 | if (!radeon_test_sync_possible(ringB, ringC)) | |
552 | continue; | |
553 | ||
ce954884 CK |
554 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); |
555 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); | |
556 | ||
557 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); | |
558 | radeon_test_ring_sync2(rdev, ringA, ringC, ringB); | |
559 | ||
560 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); | |
561 | radeon_test_ring_sync2(rdev, ringB, ringA, ringC); | |
562 | ||
563 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); | |
564 | radeon_test_ring_sync2(rdev, ringB, ringC, ringA); | |
565 | ||
566 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); | |
567 | radeon_test_ring_sync2(rdev, ringC, ringA, ringB); | |
568 | ||
569 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); | |
570 | radeon_test_ring_sync2(rdev, ringC, ringB, ringA); | |
571 | } | |
60a7e396 CK |
572 | } |
573 | } | |
574 | } |