Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/seq_file.h> | |
5a0e3ad6 | 29 | #include <linux/slab.h> |
771fe6b9 JG |
30 | #include "drmP.h" |
31 | #include "radeon_drm.h" | |
32 | #include "radeon_reg.h" | |
33 | #include "radeon.h" | |
34 | #include "atom.h" | |
35 | ||
36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); | |
af9720f4 | 37 | int radeon_debugfs_ring_init(struct radeon_device *rdev); |
771fe6b9 | 38 | |
ce580fab AK |
39 | u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) |
40 | { | |
41 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | |
42 | u32 pg_idx, pg_offset; | |
43 | u32 idx_value = 0; | |
44 | int new_page; | |
45 | ||
46 | pg_idx = (idx * 4) / PAGE_SIZE; | |
47 | pg_offset = (idx * 4) % PAGE_SIZE; | |
48 | ||
49 | if (ibc->kpage_idx[0] == pg_idx) | |
50 | return ibc->kpage[0][pg_offset/4]; | |
51 | if (ibc->kpage_idx[1] == pg_idx) | |
52 | return ibc->kpage[1][pg_offset/4]; | |
53 | ||
54 | new_page = radeon_cs_update_pages(p, pg_idx); | |
55 | if (new_page < 0) { | |
56 | p->parser_error = new_page; | |
57 | return 0; | |
58 | } | |
59 | ||
60 | idx_value = ibc->kpage[new_page][pg_offset/4]; | |
61 | return idx_value; | |
62 | } | |
63 | ||
e32eb50d | 64 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
ce580fab AK |
65 | { |
66 | #if DRM_DEBUG_CODE | |
e32eb50d | 67 | if (ring->count_dw <= 0) { |
ce580fab AK |
68 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); |
69 | } | |
70 | #endif | |
e32eb50d CK |
71 | ring->ring[ring->wptr++] = v; |
72 | ring->wptr &= ring->ptr_mask; | |
73 | ring->count_dw--; | |
74 | ring->ring_free_dw--; | |
ce580fab AK |
75 | } |
76 | ||
b15ba512 JG |
77 | /* |
78 | * IB. | |
79 | */ | |
c1341e52 | 80 | bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib) |
9f93ed39 | 81 | { |
b15ba512 JG |
82 | bool done = false; |
83 | ||
84 | /* only free ib which have been emited */ | |
85 | if (ib->fence && ib->fence->emitted) { | |
86 | if (radeon_fence_signaled(ib->fence)) { | |
87 | radeon_fence_unref(&ib->fence); | |
88 | radeon_sa_bo_free(rdev, &ib->sa_bo); | |
89 | done = true; | |
90 | } | |
9f93ed39 | 91 | } |
b15ba512 | 92 | return done; |
9f93ed39 JG |
93 | } |
94 | ||
69e130a6 JG |
95 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
96 | struct radeon_ib **ib, unsigned size) | |
771fe6b9 JG |
97 | { |
98 | struct radeon_fence *fence; | |
b15ba512 JG |
99 | unsigned cretry = 0; |
100 | int r = 0, i, idx; | |
771fe6b9 JG |
101 | |
102 | *ib = NULL; | |
69e130a6 JG |
103 | /* align size on 256 bytes */ |
104 | size = ALIGN(size, 256); | |
b15ba512 | 105 | |
7b1f2485 | 106 | r = radeon_fence_create(rdev, &fence, ring); |
771fe6b9 | 107 | if (r) { |
91cb91be | 108 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
771fe6b9 JG |
109 | return r; |
110 | } | |
b15ba512 | 111 | |
771fe6b9 | 112 | mutex_lock(&rdev->ib_pool.mutex); |
b15ba512 JG |
113 | idx = rdev->ib_pool.head_id; |
114 | retry: | |
115 | if (cretry > 5) { | |
116 | dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); | |
ecb114a1 | 117 | mutex_unlock(&rdev->ib_pool.mutex); |
91cb91be | 118 | radeon_fence_unref(&fence); |
b15ba512 | 119 | return -ENOMEM; |
771fe6b9 | 120 | } |
b15ba512 JG |
121 | cretry++; |
122 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | |
123 | radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]); | |
124 | if (rdev->ib_pool.ibs[idx].fence == NULL) { | |
125 | r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, | |
126 | &rdev->ib_pool.ibs[idx].sa_bo, | |
69e130a6 | 127 | size, 256); |
b15ba512 JG |
128 | if (!r) { |
129 | *ib = &rdev->ib_pool.ibs[idx]; | |
130 | (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr; | |
131 | (*ib)->ptr += ((*ib)->sa_bo.offset >> 2); | |
132 | (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr; | |
133 | (*ib)->gpu_addr += (*ib)->sa_bo.offset; | |
134 | (*ib)->fence = fence; | |
135 | /* ib are most likely to be allocated in a ring fashion | |
136 | * thus rdev->ib_pool.head_id should be the id of the | |
137 | * oldest ib | |
138 | */ | |
139 | rdev->ib_pool.head_id = (1 + idx); | |
140 | rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); | |
141 | mutex_unlock(&rdev->ib_pool.mutex); | |
142 | return 0; | |
143 | } | |
91cb91be | 144 | } |
b15ba512 JG |
145 | idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
146 | } | |
147 | /* this should be rare event, ie all ib scheduled none signaled yet. | |
148 | */ | |
149 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | |
c1341e52 | 150 | if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) { |
b15ba512 JG |
151 | r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false); |
152 | if (!r) { | |
153 | goto retry; | |
154 | } | |
155 | /* an error happened */ | |
156 | break; | |
157 | } | |
158 | idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); | |
771fe6b9 | 159 | } |
ecb114a1 | 160 | mutex_unlock(&rdev->ib_pool.mutex); |
b15ba512 JG |
161 | radeon_fence_unref(&fence); |
162 | return r; | |
771fe6b9 JG |
163 | } |
164 | ||
165 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |
166 | { | |
167 | struct radeon_ib *tmp = *ib; | |
168 | ||
169 | *ib = NULL; | |
170 | if (tmp == NULL) { | |
171 | return; | |
172 | } | |
173 | mutex_lock(&rdev->ib_pool.mutex); | |
b15ba512 JG |
174 | if (tmp->fence && !tmp->fence->emitted) { |
175 | radeon_sa_bo_free(rdev, &tmp->sa_bo); | |
176 | radeon_fence_unref(&tmp->fence); | |
177 | } | |
771fe6b9 JG |
178 | mutex_unlock(&rdev->ib_pool.mutex); |
179 | } | |
180 | ||
771fe6b9 JG |
181 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
182 | { | |
e32eb50d | 183 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
771fe6b9 JG |
184 | int r = 0; |
185 | ||
e32eb50d | 186 | if (!ib->length_dw || !ring->ready) { |
771fe6b9 | 187 | /* TODO: Nothings in the ib we should report. */ |
91cb91be | 188 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
771fe6b9 JG |
189 | return -EINVAL; |
190 | } | |
ecb114a1 | 191 | |
6cdf6585 | 192 | /* 64 dwords should be enough for fence too */ |
e32eb50d | 193 | r = radeon_ring_lock(rdev, ring, 64); |
771fe6b9 | 194 | if (r) { |
ec4f2ac4 | 195 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); |
771fe6b9 JG |
196 | return r; |
197 | } | |
4c87bc26 | 198 | radeon_ring_ib_execute(rdev, ib->fence->ring, ib); |
771fe6b9 | 199 | radeon_fence_emit(rdev, ib->fence); |
e32eb50d | 200 | radeon_ring_unlock_commit(rdev, ring); |
771fe6b9 JG |
201 | return 0; |
202 | } | |
203 | ||
204 | int radeon_ib_pool_init(struct radeon_device *rdev) | |
205 | { | |
b15ba512 | 206 | int i, r; |
771fe6b9 | 207 | |
b15ba512 JG |
208 | mutex_lock(&rdev->ib_pool.mutex); |
209 | if (rdev->ib_pool.ready) { | |
210 | mutex_unlock(&rdev->ib_pool.mutex); | |
9f022ddf | 211 | return 0; |
771fe6b9 | 212 | } |
b15ba512 JG |
213 | |
214 | r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager, | |
215 | RADEON_IB_POOL_SIZE*64*1024, | |
216 | RADEON_GEM_DOMAIN_GTT); | |
771fe6b9 | 217 | if (r) { |
b15ba512 | 218 | mutex_unlock(&rdev->ib_pool.mutex); |
771fe6b9 JG |
219 | return r; |
220 | } | |
771fe6b9 | 221 | |
b15ba512 JG |
222 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
223 | rdev->ib_pool.ibs[i].fence = NULL; | |
771fe6b9 JG |
224 | rdev->ib_pool.ibs[i].idx = i; |
225 | rdev->ib_pool.ibs[i].length_dw = 0; | |
b15ba512 | 226 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list); |
771fe6b9 | 227 | } |
91cb91be | 228 | rdev->ib_pool.head_id = 0; |
771fe6b9 JG |
229 | rdev->ib_pool.ready = true; |
230 | DRM_INFO("radeon: ib pool ready.\n"); | |
b15ba512 | 231 | |
771fe6b9 JG |
232 | if (radeon_debugfs_ib_init(rdev)) { |
233 | DRM_ERROR("Failed to register debugfs file for IB !\n"); | |
234 | } | |
af9720f4 CK |
235 | if (radeon_debugfs_ring_init(rdev)) { |
236 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | |
237 | } | |
b15ba512 JG |
238 | mutex_unlock(&rdev->ib_pool.mutex); |
239 | return 0; | |
771fe6b9 JG |
240 | } |
241 | ||
242 | void radeon_ib_pool_fini(struct radeon_device *rdev) | |
243 | { | |
b15ba512 | 244 | unsigned i; |
4c788679 | 245 | |
771fe6b9 | 246 | mutex_lock(&rdev->ib_pool.mutex); |
b15ba512 JG |
247 | if (rdev->ib_pool.ready) { |
248 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | |
249 | radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo); | |
250 | radeon_fence_unref(&rdev->ib_pool.ibs[i].fence); | |
4c788679 | 251 | } |
b15ba512 JG |
252 | radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager); |
253 | rdev->ib_pool.ready = false; | |
771fe6b9 | 254 | } |
b15ba512 | 255 | mutex_unlock(&rdev->ib_pool.mutex); |
771fe6b9 JG |
256 | } |
257 | ||
b15ba512 JG |
258 | int radeon_ib_pool_start(struct radeon_device *rdev) |
259 | { | |
260 | return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager); | |
261 | } | |
262 | ||
263 | int radeon_ib_pool_suspend(struct radeon_device *rdev) | |
264 | { | |
265 | return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); | |
266 | } | |
771fe6b9 JG |
267 | |
268 | /* | |
269 | * Ring. | |
270 | */ | |
e32eb50d | 271 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) |
bf852799 CK |
272 | { |
273 | /* r1xx-r5xx only has CP ring */ | |
274 | if (rdev->family < CHIP_R600) | |
275 | return RADEON_RING_TYPE_GFX_INDEX; | |
276 | ||
277 | if (rdev->family >= CHIP_CAYMAN) { | |
e32eb50d | 278 | if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) |
bf852799 | 279 | return CAYMAN_RING_TYPE_CP1_INDEX; |
e32eb50d | 280 | else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) |
bf852799 CK |
281 | return CAYMAN_RING_TYPE_CP2_INDEX; |
282 | } | |
283 | return RADEON_RING_TYPE_GFX_INDEX; | |
284 | } | |
285 | ||
e32eb50d | 286 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 287 | { |
78c5560a AD |
288 | u32 rptr; |
289 | ||
724c80e1 | 290 | if (rdev->wb.enabled) |
78c5560a | 291 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
5596a9db | 292 | else |
78c5560a AD |
293 | rptr = RREG32(ring->rptr_reg); |
294 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | |
771fe6b9 | 295 | /* This works because ring_size is a power of 2 */ |
e32eb50d CK |
296 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
297 | ring->ring_free_dw -= ring->wptr; | |
298 | ring->ring_free_dw &= ring->ptr_mask; | |
299 | if (!ring->ring_free_dw) { | |
300 | ring->ring_free_dw = ring->ring_size / 4; | |
771fe6b9 JG |
301 | } |
302 | } | |
303 | ||
7b1f2485 | 304 | |
e32eb50d | 305 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
771fe6b9 JG |
306 | { |
307 | int r; | |
308 | ||
309 | /* Align requested size with padding so unlock_commit can | |
310 | * pad safely */ | |
e32eb50d CK |
311 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
312 | while (ndw > (ring->ring_free_dw - 1)) { | |
313 | radeon_ring_free_size(rdev, ring); | |
314 | if (ndw < ring->ring_free_dw) { | |
771fe6b9 JG |
315 | break; |
316 | } | |
e32eb50d | 317 | r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring)); |
91700f3c | 318 | if (r) |
771fe6b9 | 319 | return r; |
771fe6b9 | 320 | } |
e32eb50d CK |
321 | ring->count_dw = ndw; |
322 | ring->wptr_old = ring->wptr; | |
771fe6b9 JG |
323 | return 0; |
324 | } | |
325 | ||
e32eb50d | 326 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
91700f3c MG |
327 | { |
328 | int r; | |
329 | ||
e32eb50d CK |
330 | mutex_lock(&ring->mutex); |
331 | r = radeon_ring_alloc(rdev, ring, ndw); | |
91700f3c | 332 | if (r) { |
e32eb50d | 333 | mutex_unlock(&ring->mutex); |
91700f3c MG |
334 | return r; |
335 | } | |
336 | return 0; | |
337 | } | |
338 | ||
e32eb50d | 339 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 JG |
340 | { |
341 | unsigned count_dw_pad; | |
342 | unsigned i; | |
343 | ||
344 | /* We pad to match fetch size */ | |
e32eb50d CK |
345 | count_dw_pad = (ring->align_mask + 1) - |
346 | (ring->wptr & ring->align_mask); | |
771fe6b9 | 347 | for (i = 0; i < count_dw_pad; i++) { |
78c5560a | 348 | radeon_ring_write(ring, ring->nop); |
771fe6b9 JG |
349 | } |
350 | DRM_MEMORYBARRIER(); | |
78c5560a | 351 | WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); |
e32eb50d | 352 | (void)RREG32(ring->wptr_reg); |
91700f3c MG |
353 | } |
354 | ||
e32eb50d | 355 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
91700f3c | 356 | { |
e32eb50d CK |
357 | radeon_ring_commit(rdev, ring); |
358 | mutex_unlock(&ring->mutex); | |
771fe6b9 JG |
359 | } |
360 | ||
e32eb50d | 361 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 362 | { |
e32eb50d CK |
363 | ring->wptr = ring->wptr_old; |
364 | mutex_unlock(&ring->mutex); | |
771fe6b9 JG |
365 | } |
366 | ||
e32eb50d | 367 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
78c5560a AD |
368 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, |
369 | u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) | |
771fe6b9 JG |
370 | { |
371 | int r; | |
372 | ||
e32eb50d CK |
373 | ring->ring_size = ring_size; |
374 | ring->rptr_offs = rptr_offs; | |
375 | ring->rptr_reg = rptr_reg; | |
376 | ring->wptr_reg = wptr_reg; | |
78c5560a AD |
377 | ring->ptr_reg_shift = ptr_reg_shift; |
378 | ring->ptr_reg_mask = ptr_reg_mask; | |
379 | ring->nop = nop; | |
771fe6b9 | 380 | /* Allocate ring buffer */ |
e32eb50d CK |
381 | if (ring->ring_obj == NULL) { |
382 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, | |
4c788679 | 383 | RADEON_GEM_DOMAIN_GTT, |
e32eb50d | 384 | &ring->ring_obj); |
771fe6b9 | 385 | if (r) { |
4c788679 | 386 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
771fe6b9 JG |
387 | return r; |
388 | } | |
e32eb50d | 389 | r = radeon_bo_reserve(ring->ring_obj, false); |
4c788679 JG |
390 | if (unlikely(r != 0)) |
391 | return r; | |
e32eb50d CK |
392 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
393 | &ring->gpu_addr); | |
771fe6b9 | 394 | if (r) { |
e32eb50d | 395 | radeon_bo_unreserve(ring->ring_obj); |
4c788679 | 396 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
771fe6b9 JG |
397 | return r; |
398 | } | |
e32eb50d CK |
399 | r = radeon_bo_kmap(ring->ring_obj, |
400 | (void **)&ring->ring); | |
401 | radeon_bo_unreserve(ring->ring_obj); | |
771fe6b9 | 402 | if (r) { |
4c788679 | 403 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
771fe6b9 JG |
404 | return r; |
405 | } | |
406 | } | |
e32eb50d CK |
407 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
408 | ring->ring_free_dw = ring->ring_size / 4; | |
771fe6b9 JG |
409 | return 0; |
410 | } | |
411 | ||
e32eb50d | 412 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 413 | { |
4c788679 | 414 | int r; |
ca2af923 | 415 | struct radeon_bo *ring_obj; |
4c788679 | 416 | |
e32eb50d CK |
417 | mutex_lock(&ring->mutex); |
418 | ring_obj = ring->ring_obj; | |
419 | ring->ring = NULL; | |
420 | ring->ring_obj = NULL; | |
421 | mutex_unlock(&ring->mutex); | |
ca2af923 AD |
422 | |
423 | if (ring_obj) { | |
424 | r = radeon_bo_reserve(ring_obj, false); | |
4c788679 | 425 | if (likely(r == 0)) { |
ca2af923 AD |
426 | radeon_bo_kunmap(ring_obj); |
427 | radeon_bo_unpin(ring_obj); | |
428 | radeon_bo_unreserve(ring_obj); | |
4c788679 | 429 | } |
ca2af923 | 430 | radeon_bo_unref(&ring_obj); |
771fe6b9 | 431 | } |
771fe6b9 JG |
432 | } |
433 | ||
771fe6b9 JG |
434 | /* |
435 | * Debugfs info | |
436 | */ | |
437 | #if defined(CONFIG_DEBUG_FS) | |
af9720f4 CK |
438 | |
439 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |
440 | { | |
441 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
442 | struct drm_device *dev = node->minor->dev; | |
443 | struct radeon_device *rdev = dev->dev_private; | |
444 | int ridx = *(int*)node->info_ent->data; | |
445 | struct radeon_ring *ring = &rdev->ring[ridx]; | |
446 | unsigned count, i, j; | |
447 | ||
448 | radeon_ring_free_size(rdev, ring); | |
449 | count = (ring->ring_size / 4) - ring->ring_free_dw; | |
450 | seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); | |
451 | seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); | |
452 | seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); | |
453 | seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); | |
454 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | |
455 | seq_printf(m, "%u dwords in ring\n", count); | |
456 | i = ring->rptr; | |
457 | for (j = 0; j <= count; j++) { | |
458 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); | |
459 | i = (i + 1) & ring->ptr_mask; | |
460 | } | |
461 | return 0; | |
462 | } | |
463 | ||
464 | static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; | |
465 | static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; | |
466 | static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; | |
467 | ||
468 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { | |
469 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, | |
470 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, | |
471 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, | |
472 | }; | |
473 | ||
771fe6b9 JG |
474 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) |
475 | { | |
476 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
477 | struct radeon_ib *ib = node->info_ent->data; | |
478 | unsigned i; | |
479 | ||
480 | if (ib == NULL) { | |
481 | return 0; | |
482 | } | |
91cb91be | 483 | seq_printf(m, "IB %04u\n", ib->idx); |
771fe6b9 JG |
484 | seq_printf(m, "IB fence %p\n", ib->fence); |
485 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | |
486 | for (i = 0; i < ib->length_dw; i++) { | |
487 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); | |
488 | } | |
489 | return 0; | |
490 | } | |
491 | ||
492 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; | |
493 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; | |
494 | #endif | |
495 | ||
af9720f4 CK |
496 | int radeon_debugfs_ring_init(struct radeon_device *rdev) |
497 | { | |
498 | #if defined(CONFIG_DEBUG_FS) | |
499 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, | |
500 | ARRAY_SIZE(radeon_debugfs_ring_info_list)); | |
501 | #else | |
502 | return 0; | |
503 | #endif | |
504 | } | |
505 | ||
771fe6b9 JG |
506 | int radeon_debugfs_ib_init(struct radeon_device *rdev) |
507 | { | |
508 | #if defined(CONFIG_DEBUG_FS) | |
509 | unsigned i; | |
510 | ||
511 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | |
512 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); | |
513 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; | |
514 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; | |
515 | radeon_debugfs_ib_list[i].driver_features = 0; | |
516 | radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i]; | |
517 | } | |
518 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, | |
519 | RADEON_IB_POOL_SIZE); | |
520 | #else | |
521 | return 0; | |
522 | #endif | |
523 | } |