Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
c507f7ef | 27 | * Christian König |
771fe6b9 JG |
28 | */ |
29 | #include <linux/seq_file.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
771fe6b9 JG |
31 | #include "drmP.h" |
32 | #include "radeon_drm.h" | |
33 | #include "radeon_reg.h" | |
34 | #include "radeon.h" | |
35 | #include "atom.h" | |
36 | ||
c507f7ef JG |
37 | /* |
38 | * IB. | |
39 | */ | |
40 | int radeon_debugfs_sa_init(struct radeon_device *rdev); | |
771fe6b9 | 41 | |
69e130a6 | 42 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
f2e39221 | 43 | struct radeon_ib *ib, unsigned size) |
771fe6b9 | 44 | { |
c507f7ef | 45 | int r; |
b15ba512 | 46 | |
f2e39221 | 47 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); |
c507f7ef JG |
48 | if (r) { |
49 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); | |
c507f7ef | 50 | return r; |
b15ba512 | 51 | } |
c507f7ef | 52 | |
876dc9f3 CK |
53 | ib->ring = ring; |
54 | ib->fence = NULL; | |
f2e39221 JG |
55 | ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); |
56 | ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); | |
57 | ib->vm_id = 0; | |
58 | ib->is_const_ib = false; | |
59 | ib->semaphore = NULL; | |
c507f7ef JG |
60 | |
61 | return 0; | |
771fe6b9 JG |
62 | } |
63 | ||
f2e39221 | 64 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
771fe6b9 | 65 | { |
f2e39221 JG |
66 | radeon_semaphore_free(rdev, ib->semaphore, ib->fence); |
67 | radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); | |
68 | radeon_fence_unref(&ib->fence); | |
771fe6b9 JG |
69 | } |
70 | ||
771fe6b9 JG |
71 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
72 | { | |
876dc9f3 | 73 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
771fe6b9 JG |
74 | int r = 0; |
75 | ||
e32eb50d | 76 | if (!ib->length_dw || !ring->ready) { |
771fe6b9 | 77 | /* TODO: Nothings in the ib we should report. */ |
c507f7ef | 78 | dev_err(rdev->dev, "couldn't schedule ib\n"); |
771fe6b9 JG |
79 | return -EINVAL; |
80 | } | |
ecb114a1 | 81 | |
6cdf6585 | 82 | /* 64 dwords should be enough for fence too */ |
e32eb50d | 83 | r = radeon_ring_lock(rdev, ring, 64); |
771fe6b9 | 84 | if (r) { |
c507f7ef | 85 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); |
771fe6b9 JG |
86 | return r; |
87 | } | |
876dc9f3 CK |
88 | radeon_ring_ib_execute(rdev, ib->ring, ib); |
89 | r = radeon_fence_emit(rdev, &ib->fence, ib->ring); | |
90 | if (r) { | |
91 | dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); | |
92 | radeon_ring_unlock_undo(rdev, ring); | |
93 | return r; | |
94 | } | |
e32eb50d | 95 | radeon_ring_unlock_commit(rdev, ring); |
771fe6b9 JG |
96 | return 0; |
97 | } | |
98 | ||
99 | int radeon_ib_pool_init(struct radeon_device *rdev) | |
100 | { | |
c507f7ef | 101 | int r; |
771fe6b9 | 102 | |
c507f7ef | 103 | if (rdev->ib_pool_ready) { |
d54fbd49 JG |
104 | return 0; |
105 | } | |
c507f7ef | 106 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, |
c3b7fe8b CK |
107 | RADEON_IB_POOL_SIZE*64*1024, |
108 | RADEON_GEM_DOMAIN_GTT); | |
109 | if (r) { | |
c3b7fe8b CK |
110 | return r; |
111 | } | |
c507f7ef JG |
112 | rdev->ib_pool_ready = true; |
113 | if (radeon_debugfs_sa_init(rdev)) { | |
114 | dev_err(rdev->dev, "failed to register debugfs file for SA\n"); | |
771fe6b9 | 115 | } |
b15ba512 | 116 | return 0; |
771fe6b9 JG |
117 | } |
118 | ||
119 | void radeon_ib_pool_fini(struct radeon_device *rdev) | |
120 | { | |
c507f7ef JG |
121 | if (rdev->ib_pool_ready) { |
122 | radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); | |
123 | rdev->ib_pool_ready = false; | |
771fe6b9 | 124 | } |
771fe6b9 JG |
125 | } |
126 | ||
b15ba512 JG |
127 | int radeon_ib_pool_start(struct radeon_device *rdev) |
128 | { | |
c507f7ef | 129 | return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); |
b15ba512 JG |
130 | } |
131 | ||
132 | int radeon_ib_pool_suspend(struct radeon_device *rdev) | |
133 | { | |
c507f7ef | 134 | return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); |
b15ba512 | 135 | } |
771fe6b9 | 136 | |
7bd560e8 CK |
137 | int radeon_ib_ring_tests(struct radeon_device *rdev) |
138 | { | |
139 | unsigned i; | |
140 | int r; | |
141 | ||
142 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
143 | struct radeon_ring *ring = &rdev->ring[i]; | |
144 | ||
145 | if (!ring->ready) | |
146 | continue; | |
147 | ||
148 | r = radeon_ib_test(rdev, i, ring); | |
149 | if (r) { | |
150 | ring->ready = false; | |
151 | ||
152 | if (i == RADEON_RING_TYPE_GFX_INDEX) { | |
153 | /* oh, oh, that's really bad */ | |
154 | DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); | |
155 | rdev->accel_working = false; | |
156 | return r; | |
157 | ||
158 | } else { | |
159 | /* still not good, but we can live with it */ | |
160 | DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); | |
161 | } | |
162 | } | |
163 | } | |
164 | return 0; | |
165 | } | |
166 | ||
771fe6b9 JG |
167 | /* |
168 | * Ring. | |
169 | */ | |
c507f7ef JG |
170 | int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); |
171 | ||
172 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |
173 | { | |
174 | #if DRM_DEBUG_CODE | |
175 | if (ring->count_dw <= 0) { | |
176 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); | |
177 | } | |
178 | #endif | |
179 | ring->ring[ring->wptr++] = v; | |
180 | ring->wptr &= ring->ptr_mask; | |
181 | ring->count_dw--; | |
182 | ring->ring_free_dw--; | |
183 | } | |
184 | ||
e32eb50d | 185 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) |
bf852799 CK |
186 | { |
187 | /* r1xx-r5xx only has CP ring */ | |
188 | if (rdev->family < CHIP_R600) | |
189 | return RADEON_RING_TYPE_GFX_INDEX; | |
190 | ||
191 | if (rdev->family >= CHIP_CAYMAN) { | |
e32eb50d | 192 | if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) |
bf852799 | 193 | return CAYMAN_RING_TYPE_CP1_INDEX; |
e32eb50d | 194 | else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) |
bf852799 CK |
195 | return CAYMAN_RING_TYPE_CP2_INDEX; |
196 | } | |
197 | return RADEON_RING_TYPE_GFX_INDEX; | |
198 | } | |
199 | ||
e32eb50d | 200 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 201 | { |
78c5560a AD |
202 | u32 rptr; |
203 | ||
724c80e1 | 204 | if (rdev->wb.enabled) |
78c5560a | 205 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
5596a9db | 206 | else |
78c5560a AD |
207 | rptr = RREG32(ring->rptr_reg); |
208 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | |
771fe6b9 | 209 | /* This works because ring_size is a power of 2 */ |
e32eb50d CK |
210 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
211 | ring->ring_free_dw -= ring->wptr; | |
212 | ring->ring_free_dw &= ring->ptr_mask; | |
213 | if (!ring->ring_free_dw) { | |
214 | ring->ring_free_dw = ring->ring_size / 4; | |
771fe6b9 JG |
215 | } |
216 | } | |
217 | ||
7b1f2485 | 218 | |
e32eb50d | 219 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
771fe6b9 JG |
220 | { |
221 | int r; | |
222 | ||
223 | /* Align requested size with padding so unlock_commit can | |
224 | * pad safely */ | |
e32eb50d CK |
225 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
226 | while (ndw > (ring->ring_free_dw - 1)) { | |
227 | radeon_ring_free_size(rdev, ring); | |
228 | if (ndw < ring->ring_free_dw) { | |
771fe6b9 JG |
229 | break; |
230 | } | |
8a47cc9e | 231 | r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring)); |
91700f3c | 232 | if (r) |
771fe6b9 | 233 | return r; |
771fe6b9 | 234 | } |
e32eb50d CK |
235 | ring->count_dw = ndw; |
236 | ring->wptr_old = ring->wptr; | |
771fe6b9 JG |
237 | return 0; |
238 | } | |
239 | ||
e32eb50d | 240 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
91700f3c MG |
241 | { |
242 | int r; | |
243 | ||
d6999bc7 | 244 | mutex_lock(&rdev->ring_lock); |
e32eb50d | 245 | r = radeon_ring_alloc(rdev, ring, ndw); |
91700f3c | 246 | if (r) { |
d6999bc7 | 247 | mutex_unlock(&rdev->ring_lock); |
91700f3c MG |
248 | return r; |
249 | } | |
250 | return 0; | |
251 | } | |
252 | ||
e32eb50d | 253 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 JG |
254 | { |
255 | unsigned count_dw_pad; | |
256 | unsigned i; | |
257 | ||
258 | /* We pad to match fetch size */ | |
e32eb50d CK |
259 | count_dw_pad = (ring->align_mask + 1) - |
260 | (ring->wptr & ring->align_mask); | |
771fe6b9 | 261 | for (i = 0; i < count_dw_pad; i++) { |
78c5560a | 262 | radeon_ring_write(ring, ring->nop); |
771fe6b9 JG |
263 | } |
264 | DRM_MEMORYBARRIER(); | |
78c5560a | 265 | WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); |
e32eb50d | 266 | (void)RREG32(ring->wptr_reg); |
91700f3c MG |
267 | } |
268 | ||
e32eb50d | 269 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
91700f3c | 270 | { |
e32eb50d | 271 | radeon_ring_commit(rdev, ring); |
d6999bc7 | 272 | mutex_unlock(&rdev->ring_lock); |
771fe6b9 JG |
273 | } |
274 | ||
d6999bc7 | 275 | void radeon_ring_undo(struct radeon_ring *ring) |
771fe6b9 | 276 | { |
e32eb50d | 277 | ring->wptr = ring->wptr_old; |
d6999bc7 CK |
278 | } |
279 | ||
280 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) | |
281 | { | |
282 | radeon_ring_undo(ring); | |
283 | mutex_unlock(&rdev->ring_lock); | |
771fe6b9 JG |
284 | } |
285 | ||
7b9ef16b CK |
286 | void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) |
287 | { | |
288 | int r; | |
289 | ||
7b9ef16b CK |
290 | radeon_ring_free_size(rdev, ring); |
291 | if (ring->rptr == ring->wptr) { | |
292 | r = radeon_ring_alloc(rdev, ring, 1); | |
293 | if (!r) { | |
294 | radeon_ring_write(ring, ring->nop); | |
295 | radeon_ring_commit(rdev, ring); | |
296 | } | |
297 | } | |
7b9ef16b CK |
298 | } |
299 | ||
069211e5 CK |
300 | void radeon_ring_lockup_update(struct radeon_ring *ring) |
301 | { | |
302 | ring->last_rptr = ring->rptr; | |
303 | ring->last_activity = jiffies; | |
304 | } | |
305 | ||
306 | /** | |
307 | * radeon_ring_test_lockup() - check if ring is lockedup by recording information | |
308 | * @rdev: radeon device structure | |
309 | * @ring: radeon_ring structure holding ring information | |
310 | * | |
311 | * We don't need to initialize the lockup tracking information as we will either | |
312 | * have CP rptr to a different value of jiffies wrap around which will force | |
313 | * initialization of the lockup tracking informations. | |
314 | * | |
315 | * A possible false positivie is if we get call after while and last_cp_rptr == | |
316 | * the current CP rptr, even if it's unlikely it might happen. To avoid this | |
317 | * if the elapsed time since last call is bigger than 2 second than we return | |
318 | * false and update the tracking information. Due to this the caller must call | |
319 | * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported | |
320 | * the fencing code should be cautious about that. | |
321 | * | |
322 | * Caller should write to the ring to force CP to do something so we don't get | |
323 | * false positive when CP is just gived nothing to do. | |
324 | * | |
325 | **/ | |
326 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |
327 | { | |
328 | unsigned long cjiffies, elapsed; | |
329 | uint32_t rptr; | |
330 | ||
331 | cjiffies = jiffies; | |
332 | if (!time_after(cjiffies, ring->last_activity)) { | |
333 | /* likely a wrap around */ | |
334 | radeon_ring_lockup_update(ring); | |
335 | return false; | |
336 | } | |
337 | rptr = RREG32(ring->rptr_reg); | |
338 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | |
339 | if (ring->rptr != ring->last_rptr) { | |
340 | /* CP is still working no lockup */ | |
341 | radeon_ring_lockup_update(ring); | |
342 | return false; | |
343 | } | |
344 | elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); | |
3368ff0c | 345 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { |
069211e5 CK |
346 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
347 | return true; | |
348 | } | |
349 | /* give a chance to the GPU ... */ | |
350 | return false; | |
351 | } | |
352 | ||
e32eb50d | 353 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
78c5560a AD |
354 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, |
355 | u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) | |
771fe6b9 JG |
356 | { |
357 | int r; | |
358 | ||
e32eb50d CK |
359 | ring->ring_size = ring_size; |
360 | ring->rptr_offs = rptr_offs; | |
361 | ring->rptr_reg = rptr_reg; | |
362 | ring->wptr_reg = wptr_reg; | |
78c5560a AD |
363 | ring->ptr_reg_shift = ptr_reg_shift; |
364 | ring->ptr_reg_mask = ptr_reg_mask; | |
365 | ring->nop = nop; | |
771fe6b9 | 366 | /* Allocate ring buffer */ |
e32eb50d CK |
367 | if (ring->ring_obj == NULL) { |
368 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, | |
40f5cf99 AD |
369 | RADEON_GEM_DOMAIN_GTT, |
370 | NULL, &ring->ring_obj); | |
771fe6b9 | 371 | if (r) { |
4c788679 | 372 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
771fe6b9 JG |
373 | return r; |
374 | } | |
e32eb50d | 375 | r = radeon_bo_reserve(ring->ring_obj, false); |
4c788679 JG |
376 | if (unlikely(r != 0)) |
377 | return r; | |
e32eb50d CK |
378 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
379 | &ring->gpu_addr); | |
771fe6b9 | 380 | if (r) { |
e32eb50d | 381 | radeon_bo_unreserve(ring->ring_obj); |
4c788679 | 382 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
771fe6b9 JG |
383 | return r; |
384 | } | |
e32eb50d CK |
385 | r = radeon_bo_kmap(ring->ring_obj, |
386 | (void **)&ring->ring); | |
387 | radeon_bo_unreserve(ring->ring_obj); | |
771fe6b9 | 388 | if (r) { |
4c788679 | 389 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
771fe6b9 JG |
390 | return r; |
391 | } | |
392 | } | |
e32eb50d CK |
393 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
394 | ring->ring_free_dw = ring->ring_size / 4; | |
ec1a6cce CK |
395 | if (radeon_debugfs_ring_init(rdev, ring)) { |
396 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | |
397 | } | |
771fe6b9 JG |
398 | return 0; |
399 | } | |
400 | ||
e32eb50d | 401 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 402 | { |
4c788679 | 403 | int r; |
ca2af923 | 404 | struct radeon_bo *ring_obj; |
4c788679 | 405 | |
d6999bc7 | 406 | mutex_lock(&rdev->ring_lock); |
e32eb50d | 407 | ring_obj = ring->ring_obj; |
d6999bc7 | 408 | ring->ready = false; |
e32eb50d CK |
409 | ring->ring = NULL; |
410 | ring->ring_obj = NULL; | |
d6999bc7 | 411 | mutex_unlock(&rdev->ring_lock); |
ca2af923 AD |
412 | |
413 | if (ring_obj) { | |
414 | r = radeon_bo_reserve(ring_obj, false); | |
4c788679 | 415 | if (likely(r == 0)) { |
ca2af923 AD |
416 | radeon_bo_kunmap(ring_obj); |
417 | radeon_bo_unpin(ring_obj); | |
418 | radeon_bo_unreserve(ring_obj); | |
4c788679 | 419 | } |
ca2af923 | 420 | radeon_bo_unref(&ring_obj); |
771fe6b9 | 421 | } |
771fe6b9 JG |
422 | } |
423 | ||
771fe6b9 JG |
424 | /* |
425 | * Debugfs info | |
426 | */ | |
427 | #if defined(CONFIG_DEBUG_FS) | |
af9720f4 CK |
428 | |
429 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |
430 | { | |
431 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
432 | struct drm_device *dev = node->minor->dev; | |
433 | struct radeon_device *rdev = dev->dev_private; | |
434 | int ridx = *(int*)node->info_ent->data; | |
435 | struct radeon_ring *ring = &rdev->ring[ridx]; | |
436 | unsigned count, i, j; | |
437 | ||
438 | radeon_ring_free_size(rdev, ring); | |
439 | count = (ring->ring_size / 4) - ring->ring_free_dw; | |
440 | seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); | |
441 | seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); | |
442 | seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); | |
443 | seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); | |
444 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | |
445 | seq_printf(m, "%u dwords in ring\n", count); | |
446 | i = ring->rptr; | |
447 | for (j = 0; j <= count; j++) { | |
448 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); | |
449 | i = (i + 1) & ring->ptr_mask; | |
450 | } | |
451 | return 0; | |
452 | } | |
453 | ||
454 | static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; | |
455 | static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; | |
456 | static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; | |
457 | ||
458 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { | |
459 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, | |
460 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, | |
461 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, | |
462 | }; | |
463 | ||
711a9729 CK |
464 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
465 | { | |
466 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
467 | struct drm_device *dev = node->minor->dev; | |
468 | struct radeon_device *rdev = dev->dev_private; | |
469 | ||
c507f7ef | 470 | radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); |
711a9729 CK |
471 | |
472 | return 0; | |
473 | ||
474 | } | |
475 | ||
476 | static struct drm_info_list radeon_debugfs_sa_list[] = { | |
477 | {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, | |
478 | }; | |
479 | ||
771fe6b9 JG |
480 | #endif |
481 | ||
ec1a6cce | 482 | int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) |
af9720f4 CK |
483 | { |
484 | #if defined(CONFIG_DEBUG_FS) | |
ec1a6cce CK |
485 | unsigned i; |
486 | for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { | |
487 | struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; | |
488 | int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; | |
489 | unsigned r; | |
490 | ||
491 | if (&rdev->ring[ridx] != ring) | |
492 | continue; | |
493 | ||
494 | r = radeon_debugfs_add_files(rdev, info, 1); | |
495 | if (r) | |
496 | return r; | |
497 | } | |
af9720f4 | 498 | #endif |
ec1a6cce | 499 | return 0; |
af9720f4 CK |
500 | } |
501 | ||
c507f7ef | 502 | int radeon_debugfs_sa_init(struct radeon_device *rdev) |
771fe6b9 JG |
503 | { |
504 | #if defined(CONFIG_DEBUG_FS) | |
c507f7ef | 505 | return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); |
771fe6b9 JG |
506 | #else |
507 | return 0; | |
508 | #endif | |
509 | } |