Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | * Christian König | |
28 | */ | |
29 | #include <linux/seq_file.h> | |
30 | #include <linux/slab.h> | |
4f4824b5 | 31 | #include <linux/debugfs.h> |
d38ceaf9 AD |
32 | #include <drm/drmP.h> |
33 | #include <drm/amdgpu_drm.h> | |
34 | #include "amdgpu.h" | |
35 | #include "atom.h" | |
36 | ||
37 | /* | |
38 | * Rings | |
39 | * Most engines on the GPU are fed via ring buffers. Ring | |
40 | * buffers are areas of GPU accessible memory that the host | |
41 | * writes commands into and the GPU reads commands out of. | |
42 | * There is a rptr (read pointer) that determines where the | |
43 | * GPU is currently reading, and a wptr (write pointer) | |
44 | * which determines where the host has written. When the | |
45 | * pointers are equal, the ring is idle. When the host | |
46 | * writes commands to the ring buffer, it increments the | |
47 | * wptr. The GPU then starts fetching commands and executes | |
48 | * them until the pointers are equal again. | |
49 | */ | |
eb430969 CK |
50 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, |
51 | struct amdgpu_ring *ring); | |
a909c6bd | 52 | static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); |
d38ceaf9 | 53 | |
d38ceaf9 AD |
54 | /** |
55 | * amdgpu_ring_alloc - allocate space on the ring buffer | |
56 | * | |
57 | * @adev: amdgpu_device pointer | |
58 | * @ring: amdgpu_ring structure holding ring information | |
59 | * @ndw: number of dwords to allocate in the ring buffer | |
60 | * | |
61 | * Allocate @ndw dwords in the ring buffer (all asics). | |
62 | * Returns 0 on success, error on failure. | |
63 | */ | |
64 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) | |
65 | { | |
d38ceaf9 AD |
66 | /* Align requested size with padding so unlock_commit can |
67 | * pad safely */ | |
79887142 | 68 | ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; |
c7e6be23 CK |
69 | |
70 | /* Make sure we aren't trying to allocate more space | |
71 | * than the maximum for one submission | |
72 | */ | |
73 | if (WARN_ON_ONCE(ndw > ring->max_dw)) | |
74 | return -ENOMEM; | |
75 | ||
d38ceaf9 AD |
76 | ring->count_dw = ndw; |
77 | ring->wptr_old = ring->wptr; | |
f06505b8 CK |
78 | |
79 | if (ring->funcs->begin_use) | |
80 | ring->funcs->begin_use(ring); | |
81 | ||
d38ceaf9 AD |
82 | return 0; |
83 | } | |
84 | ||
edff0e28 JZ |
85 | /** amdgpu_ring_insert_nop - insert NOP packets |
86 | * | |
87 | * @ring: amdgpu_ring structure holding ring information | |
88 | * @count: the number of NOP packets to insert | |
89 | * | |
90 | * This is the generic insert_nop function for rings except SDMA | |
91 | */ | |
92 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |
93 | { | |
94 | int i; | |
95 | ||
96 | for (i = 0; i < count; i++) | |
79887142 | 97 | amdgpu_ring_write(ring, ring->funcs->nop); |
edff0e28 JZ |
98 | } |
99 | ||
9e5d5309 CK |
100 | /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets |
101 | * | |
102 | * @ring: amdgpu_ring structure holding ring information | |
103 | * @ib: IB to add NOP packets to | |
104 | * | |
105 | * This is the generic pad_ib function for rings except SDMA | |
106 | */ | |
107 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | |
108 | { | |
79887142 CK |
109 | while (ib->length_dw & ring->funcs->align_mask) |
110 | ib->ptr[ib->length_dw++] = ring->funcs->nop; | |
9e5d5309 CK |
111 | } |
112 | ||
d38ceaf9 AD |
113 | /** |
114 | * amdgpu_ring_commit - tell the GPU to execute the new | |
115 | * commands on the ring buffer | |
116 | * | |
117 | * @adev: amdgpu_device pointer | |
118 | * @ring: amdgpu_ring structure holding ring information | |
119 | * | |
120 | * Update the wptr (write pointer) to tell the GPU to | |
121 | * execute new commands on the ring buffer (all asics). | |
122 | */ | |
123 | void amdgpu_ring_commit(struct amdgpu_ring *ring) | |
124 | { | |
edff0e28 JZ |
125 | uint32_t count; |
126 | ||
d38ceaf9 | 127 | /* We pad to match fetch size */ |
79887142 CK |
128 | count = ring->funcs->align_mask + 1 - |
129 | (ring->wptr & ring->funcs->align_mask); | |
130 | count %= ring->funcs->align_mask + 1; | |
edff0e28 JZ |
131 | ring->funcs->insert_nop(ring, count); |
132 | ||
d38ceaf9 AD |
133 | mb(); |
134 | amdgpu_ring_set_wptr(ring); | |
f06505b8 CK |
135 | |
136 | if (ring->funcs->end_use) | |
137 | ring->funcs->end_use(ring); | |
795f2813 | 138 | |
dce1e131 PD |
139 | if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) |
140 | amdgpu_ring_lru_touch(ring->adev, ring); | |
d38ceaf9 AD |
141 | } |
142 | ||
d38ceaf9 AD |
143 | /** |
144 | * amdgpu_ring_undo - reset the wptr | |
145 | * | |
146 | * @ring: amdgpu_ring structure holding ring information | |
147 | * | |
148 | * Reset the driver's copy of the wptr (all asics). | |
149 | */ | |
150 | void amdgpu_ring_undo(struct amdgpu_ring *ring) | |
151 | { | |
152 | ring->wptr = ring->wptr_old; | |
f06505b8 CK |
153 | |
154 | if (ring->funcs->end_use) | |
155 | ring->funcs->end_use(ring); | |
d38ceaf9 AD |
156 | } |
157 | ||
b2ff0e8a AR |
158 | /** |
159 | * amdgpu_ring_priority_put - restore a ring's priority | |
160 | * | |
161 | * @ring: amdgpu_ring structure holding the information | |
162 | * @priority: target priority | |
163 | * | |
164 | * Release a request for executing at @priority | |
165 | */ | |
166 | void amdgpu_ring_priority_put(struct amdgpu_ring *ring, | |
1b1f42d8 | 167 | enum drm_sched_priority priority) |
b2ff0e8a AR |
168 | { |
169 | int i; | |
170 | ||
171 | if (!ring->funcs->set_priority) | |
172 | return; | |
173 | ||
174 | if (atomic_dec_return(&ring->num_jobs[priority]) > 0) | |
175 | return; | |
176 | ||
177 | /* no need to restore if the job is already at the lowest priority */ | |
1b1f42d8 | 178 | if (priority == DRM_SCHED_PRIORITY_NORMAL) |
b2ff0e8a AR |
179 | return; |
180 | ||
181 | mutex_lock(&ring->priority_mutex); | |
182 | /* something higher prio is executing, no need to decay */ | |
183 | if (ring->priority > priority) | |
184 | goto out_unlock; | |
185 | ||
186 | /* decay priority to the next level with a job available */ | |
1b1f42d8 LS |
187 | for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { |
188 | if (i == DRM_SCHED_PRIORITY_NORMAL | |
b2ff0e8a AR |
189 | || atomic_read(&ring->num_jobs[i])) { |
190 | ring->priority = i; | |
191 | ring->funcs->set_priority(ring, i); | |
192 | break; | |
193 | } | |
194 | } | |
195 | ||
196 | out_unlock: | |
197 | mutex_unlock(&ring->priority_mutex); | |
198 | } | |
199 | ||
200 | /** | |
201 | * amdgpu_ring_priority_get - change the ring's priority | |
202 | * | |
203 | * @ring: amdgpu_ring structure holding the information | |
204 | * @priority: target priority | |
205 | * | |
206 | * Request a ring's priority to be raised to @priority (refcounted). | |
207 | */ | |
208 | void amdgpu_ring_priority_get(struct amdgpu_ring *ring, | |
1b1f42d8 | 209 | enum drm_sched_priority priority) |
b2ff0e8a AR |
210 | { |
211 | if (!ring->funcs->set_priority) | |
212 | return; | |
213 | ||
214 | atomic_inc(&ring->num_jobs[priority]); | |
215 | ||
216 | mutex_lock(&ring->priority_mutex); | |
217 | if (priority <= ring->priority) | |
218 | goto out_unlock; | |
219 | ||
220 | ring->priority = priority; | |
221 | ring->funcs->set_priority(ring, priority); | |
222 | ||
223 | out_unlock: | |
224 | mutex_unlock(&ring->priority_mutex); | |
225 | } | |
226 | ||
d38ceaf9 AD |
227 | /** |
228 | * amdgpu_ring_init - init driver ring struct. | |
229 | * | |
230 | * @adev: amdgpu_device pointer | |
231 | * @ring: amdgpu_ring structure holding ring information | |
a3f1cf35 | 232 | * @max_ndw: maximum number of dw for ring alloc |
d38ceaf9 AD |
233 | * @nop: nop packet for this ring |
234 | * | |
235 | * Initialize the driver information for the selected ring (all asics). | |
236 | * Returns 0 on success, error on failure. | |
237 | */ | |
238 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |
79887142 CK |
239 | unsigned max_dw, struct amdgpu_irq_src *irq_src, |
240 | unsigned irq_type) | |
d38ceaf9 | 241 | { |
b2ff0e8a | 242 | int r, i; |
b249e18d AD |
243 | int sched_hw_submission = amdgpu_sched_hw_submission; |
244 | ||
245 | /* Set the hw submission limit higher for KIQ because | |
246 | * it's used for a number of gfx/compute tasks by both | |
247 | * KFD and KGD which may have outstanding fences and | |
248 | * it doesn't really use the gpu scheduler anyway; | |
249 | * KIQ tasks get submitted directly to the ring. | |
250 | */ | |
251 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) | |
252 | sched_hw_submission = max(sched_hw_submission, 256); | |
d38ceaf9 AD |
253 | |
254 | if (ring->adev == NULL) { | |
255 | if (adev->num_rings >= AMDGPU_MAX_RINGS) | |
256 | return -EINVAL; | |
257 | ||
258 | ring->adev = adev; | |
259 | ring->idx = adev->num_rings++; | |
260 | adev->rings[ring->idx] = ring; | |
b249e18d | 261 | r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); |
4f839a24 CK |
262 | if (r) |
263 | return r; | |
d38ceaf9 AD |
264 | } |
265 | ||
131b4b36 | 266 | r = amdgpu_device_wb_get(adev, &ring->rptr_offs); |
97407b63 AD |
267 | if (r) { |
268 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); | |
269 | return r; | |
d38ceaf9 AD |
270 | } |
271 | ||
131b4b36 | 272 | r = amdgpu_device_wb_get(adev, &ring->wptr_offs); |
97407b63 AD |
273 | if (r) { |
274 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); | |
275 | return r; | |
276 | } | |
0915fdbc | 277 | |
131b4b36 | 278 | r = amdgpu_device_wb_get(adev, &ring->fence_offs); |
97407b63 AD |
279 | if (r) { |
280 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); | |
281 | return r; | |
d38ceaf9 AD |
282 | } |
283 | ||
131b4b36 | 284 | r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); |
128cff1a ML |
285 | if (r) { |
286 | dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); | |
287 | return r; | |
288 | } | |
289 | ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); | |
290 | ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; | |
714fbf80 ML |
291 | /* always set cond_exec_polling to CONTINUE */ |
292 | *ring->cond_exe_cpu_addr = 1; | |
128cff1a | 293 | |
d38ceaf9 AD |
294 | r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); |
295 | if (r) { | |
296 | dev_err(adev->dev, "failed initializing fences (%d).\n", r); | |
297 | return r; | |
298 | } | |
299 | ||
b249e18d | 300 | ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); |
d38ceaf9 | 301 | |
e09706f4 ML |
302 | ring->buf_mask = (ring->ring_size / 4) - 1; |
303 | ring->ptr_mask = ring->funcs->support_64bit_ptrs ? | |
304 | 0xffffffffffffffff : ring->buf_mask; | |
d38ceaf9 AD |
305 | /* Allocate ring buffer */ |
306 | if (ring->ring_obj == NULL) { | |
37ac235b CK |
307 | r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, |
308 | AMDGPU_GEM_DOMAIN_GTT, | |
309 | &ring->ring_obj, | |
310 | &ring->gpu_addr, | |
311 | (void **)&ring->ring); | |
d38ceaf9 AD |
312 | if (r) { |
313 | dev_err(adev->dev, "(%d) ring create failed\n", r); | |
314 | return r; | |
315 | } | |
f6bd7942 | 316 | amdgpu_ring_clear_ring(ring); |
d38ceaf9 | 317 | } |
536fbf94 | 318 | |
a3f1cf35 | 319 | ring->max_dw = max_dw; |
1b1f42d8 | 320 | ring->priority = DRM_SCHED_PRIORITY_NORMAL; |
b2ff0e8a | 321 | mutex_init(&ring->priority_mutex); |
795f2813 AR |
322 | INIT_LIST_HEAD(&ring->lru_list); |
323 | amdgpu_ring_lru_touch(adev, ring); | |
d38ceaf9 | 324 | |
1b1f42d8 | 325 | for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) |
b2ff0e8a AR |
326 | atomic_set(&ring->num_jobs[i], 0); |
327 | ||
d38ceaf9 AD |
328 | if (amdgpu_debugfs_ring_init(adev, ring)) { |
329 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | |
330 | } | |
dd684d31 | 331 | |
d38ceaf9 AD |
332 | return 0; |
333 | } | |
334 | ||
335 | /** | |
336 | * amdgpu_ring_fini - tear down the driver ring struct. | |
337 | * | |
338 | * @adev: amdgpu_device pointer | |
339 | * @ring: amdgpu_ring structure holding ring information | |
340 | * | |
341 | * Tear down the driver information for the selected ring (all asics). | |
342 | */ | |
343 | void amdgpu_ring_fini(struct amdgpu_ring *ring) | |
344 | { | |
d38ceaf9 | 345 | ring->ready = false; |
d38ceaf9 | 346 | |
41cc07cf TH |
347 | /* Not to finish a ring which is not initialized */ |
348 | if (!(ring->adev) || !(ring->adev->rings[ring->idx])) | |
349 | return; | |
350 | ||
131b4b36 AD |
351 | amdgpu_device_wb_free(ring->adev, ring->rptr_offs); |
352 | amdgpu_device_wb_free(ring->adev, ring->wptr_offs); | |
7014285a | 353 | |
131b4b36 AD |
354 | amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); |
355 | amdgpu_device_wb_free(ring->adev, ring->fence_offs); | |
d38ceaf9 | 356 | |
8640faed JZ |
357 | amdgpu_bo_free_kernel(&ring->ring_obj, |
358 | &ring->gpu_addr, | |
359 | (void **)&ring->ring); | |
360 | ||
a909c6bd | 361 | amdgpu_debugfs_ring_fini(ring); |
d8907643 | 362 | |
3af81440 CK |
363 | dma_fence_put(ring->vmid_wait); |
364 | ring->vmid_wait = NULL; | |
365 | ||
d8907643 | 366 | ring->adev->rings[ring->idx] = NULL; |
d38ceaf9 AD |
367 | } |
368 | ||
795f2813 AR |
369 | static void amdgpu_ring_lru_touch_locked(struct amdgpu_device *adev, |
370 | struct amdgpu_ring *ring) | |
371 | { | |
372 | /* list_move_tail handles the case where ring isn't part of the list */ | |
373 | list_move_tail(&ring->lru_list, &adev->ring_lru_list); | |
374 | } | |
375 | ||
6065343a AR |
376 | static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring, |
377 | int *blacklist, int num_blacklist) | |
378 | { | |
379 | int i; | |
380 | ||
381 | for (i = 0; i < num_blacklist; i++) { | |
382 | if (ring->idx == blacklist[i]) | |
383 | return true; | |
384 | } | |
385 | ||
386 | return false; | |
387 | } | |
388 | ||
795f2813 AR |
389 | /** |
390 | * amdgpu_ring_lru_get - get the least recently used ring for a HW IP block | |
391 | * | |
392 | * @adev: amdgpu_device pointer | |
393 | * @type: amdgpu_ring_type enum | |
6065343a AR |
394 | * @blacklist: blacklisted ring ids array |
395 | * @num_blacklist: number of entries in @blacklist | |
35161bbc | 396 | * @lru_pipe_order: find a ring from the least recently used pipe |
795f2813 AR |
397 | * @ring: output ring |
398 | * | |
399 | * Retrieve the amdgpu_ring structure for the least recently used ring of | |
400 | * a specific IP block (all asics). | |
401 | * Returns 0 on success, error on failure. | |
402 | */ | |
35161bbc AR |
403 | int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, |
404 | int *blacklist, int num_blacklist, | |
405 | bool lru_pipe_order, struct amdgpu_ring **ring) | |
795f2813 AR |
406 | { |
407 | struct amdgpu_ring *entry; | |
408 | ||
409 | /* List is sorted in LRU order, find first entry corresponding | |
410 | * to the desired HW IP */ | |
411 | *ring = NULL; | |
412 | spin_lock(&adev->ring_lru_list_lock); | |
413 | list_for_each_entry(entry, &adev->ring_lru_list, lru_list) { | |
6065343a AR |
414 | if (entry->funcs->type != type) |
415 | continue; | |
416 | ||
417 | if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist)) | |
418 | continue; | |
419 | ||
35161bbc AR |
420 | if (!*ring) { |
421 | *ring = entry; | |
422 | ||
423 | /* We are done for ring LRU */ | |
424 | if (!lru_pipe_order) | |
425 | break; | |
426 | } | |
427 | ||
428 | /* Move all rings on the same pipe to the end of the list */ | |
429 | if (entry->pipe == (*ring)->pipe) | |
430 | amdgpu_ring_lru_touch_locked(adev, entry); | |
795f2813 | 431 | } |
35161bbc AR |
432 | |
433 | /* Move the ring we found to the end of the list */ | |
434 | if (*ring) | |
435 | amdgpu_ring_lru_touch_locked(adev, *ring); | |
436 | ||
795f2813 AR |
437 | spin_unlock(&adev->ring_lru_list_lock); |
438 | ||
439 | if (!*ring) { | |
440 | DRM_ERROR("Ring LRU contains no entries for ring type:%d\n", type); | |
441 | return -EINVAL; | |
442 | } | |
443 | ||
444 | return 0; | |
445 | } | |
446 | ||
447 | /** | |
448 | * amdgpu_ring_lru_touch - mark a ring as recently being used | |
449 | * | |
450 | * @adev: amdgpu_device pointer | |
451 | * @ring: ring to touch | |
452 | * | |
453 | * Move @ring to the tail of the lru list | |
454 | */ | |
455 | void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring) | |
456 | { | |
457 | spin_lock(&adev->ring_lru_list_lock); | |
458 | amdgpu_ring_lru_touch_locked(adev, ring); | |
459 | spin_unlock(&adev->ring_lru_list_lock); | |
460 | } | |
461 | ||
82853638 AD |
462 | /** |
463 | * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper | |
464 | * | |
465 | * @adev: amdgpu_device pointer | |
466 | * @reg0: register to write | |
467 | * @reg1: register to wait on | |
468 | * @ref: reference value to write/wait on | |
469 | * @mask: mask to wait on | |
470 | * | |
471 | * Helper for rings that don't support write and wait in a | |
472 | * single oneshot packet. | |
473 | */ | |
474 | void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, | |
475 | uint32_t reg0, uint32_t reg1, | |
476 | uint32_t ref, uint32_t mask) | |
477 | { | |
478 | amdgpu_ring_emit_wreg(ring, reg0, ref); | |
479 | amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); | |
480 | } | |
481 | ||
d38ceaf9 AD |
482 | /* |
483 | * Debugfs info | |
484 | */ | |
485 | #if defined(CONFIG_DEBUG_FS) | |
486 | ||
4f4824b5 TSD |
487 | /* Layout of file is 12 bytes consisting of |
488 | * - rptr | |
489 | * - wptr | |
490 | * - driver's copy of wptr | |
491 | * | |
492 | * followed by n-words of ring data | |
493 | */ | |
494 | static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, | |
495 | size_t size, loff_t *pos) | |
d38ceaf9 | 496 | { |
45063097 | 497 | struct amdgpu_ring *ring = file_inode(f)->i_private; |
4f4824b5 TSD |
498 | int r, i; |
499 | uint32_t value, result, early[3]; | |
500 | ||
c71dbd93 | 501 | if (*pos & 3 || size & 3) |
4f4824b5 TSD |
502 | return -EINVAL; |
503 | ||
504 | result = 0; | |
505 | ||
506 | if (*pos < 12) { | |
9c5c71bb | 507 | early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; |
ec63982e TSD |
508 | early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; |
509 | early[2] = ring->wptr & ring->buf_mask; | |
4f4824b5 TSD |
510 | for (i = *pos / 4; i < 3 && size; i++) { |
511 | r = put_user(early[i], (uint32_t *)buf); | |
512 | if (r) | |
513 | return r; | |
514 | buf += 4; | |
515 | result += 4; | |
516 | size -= 4; | |
517 | *pos += 4; | |
518 | } | |
c7e6be23 | 519 | } |
4f4824b5 TSD |
520 | |
521 | while (size) { | |
522 | if (*pos >= (ring->ring_size + 12)) | |
523 | return result; | |
714fbf80 | 524 | |
4f4824b5 TSD |
525 | value = ring->ring[(*pos - 12)/4]; |
526 | r = put_user(value, (uint32_t*)buf); | |
527 | if (r) | |
528 | return r; | |
529 | buf += 4; | |
530 | result += 4; | |
531 | size -= 4; | |
532 | *pos += 4; | |
d38ceaf9 | 533 | } |
4f4824b5 TSD |
534 | |
535 | return result; | |
d38ceaf9 AD |
536 | } |
537 | ||
4f4824b5 TSD |
538 | static const struct file_operations amdgpu_debugfs_ring_fops = { |
539 | .owner = THIS_MODULE, | |
540 | .read = amdgpu_debugfs_ring_read, | |
541 | .llseek = default_llseek | |
542 | }; | |
d38ceaf9 AD |
543 | |
544 | #endif | |
545 | ||
771c8ec1 CK |
546 | static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, |
547 | struct amdgpu_ring *ring) | |
d38ceaf9 AD |
548 | { |
549 | #if defined(CONFIG_DEBUG_FS) | |
4f4824b5 TSD |
550 | struct drm_minor *minor = adev->ddev->primary; |
551 | struct dentry *ent, *root = minor->debugfs_root; | |
552 | char name[32]; | |
d38ceaf9 | 553 | |
771c8ec1 | 554 | sprintf(name, "amdgpu_ring_%s", ring->name); |
771c8ec1 | 555 | |
4f4824b5 TSD |
556 | ent = debugfs_create_file(name, |
557 | S_IFREG | S_IRUGO, root, | |
558 | ring, &amdgpu_debugfs_ring_fops); | |
eeb2fa0c DC |
559 | if (!ent) |
560 | return -ENOMEM; | |
4f4824b5 TSD |
561 | |
562 | i_size_write(ent->d_inode, ring->ring_size + 12); | |
a909c6bd | 563 | ring->ent = ent; |
d38ceaf9 AD |
564 | #endif |
565 | return 0; | |
566 | } | |
a909c6bd ML |
567 | |
568 | static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) | |
569 | { | |
570 | #if defined(CONFIG_DEBUG_FS) | |
571 | debugfs_remove(ring->ent); | |
572 | #endif | |
573 | } |