Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Dave Airlie | |
30 | */ | |
0e1a351d | 31 | |
60063497 | 32 | #include <linux/atomic.h> |
33d5ae6c | 33 | #include <linux/debugfs.h> |
0e1a351d | 34 | #include <linux/firmware.h> |
771fe6b9 | 35 | #include <linux/kref.h> |
0e1a351d SR |
36 | #include <linux/sched/signal.h> |
37 | #include <linux/seq_file.h> | |
5a0e3ad6 | 38 | #include <linux/slab.h> |
0e1a351d SR |
39 | #include <linux/wait.h> |
40 | ||
f9183127 SR |
41 | #include <drm/drm_device.h> |
42 | #include <drm/drm_file.h> | |
0e1a351d | 43 | |
771fe6b9 | 44 | #include "radeon.h" |
f9183127 | 45 | #include "radeon_reg.h" |
99ee7fac | 46 | #include "radeon_trace.h" |
771fe6b9 | 47 | |
d66b7ec2 | 48 | /* |
d66b7ec2 AD |
49 | * Fences mark an event in the GPUs pipeline and are used |
50 | * for GPU/CPU synchronization. When the fence is written, | |
51 | * it is expected that all buffers associated with that fence | |
52 | * are no longer in use by the associated ring on the GPU and | |
04f61f6c | 53 | * that the relevant GPU caches have been flushed. Whether |
d66b7ec2 AD |
54 | * we use a scratch register or memory location depends on the asic |
55 | * and whether writeback is enabled. | |
56 | */ | |
57 | ||
58 | /** | |
59 | * radeon_fence_write - write a fence value | |
60 | * | |
61 | * @rdev: radeon_device pointer | |
62 | * @seq: sequence number to write | |
63 | * @ring: ring index the fence is associated with | |
64 | * | |
65 | * Writes a fence value to memory or a scratch register (all asics). | |
66 | */ | |
7465280c | 67 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
b81157d0 | 68 | { |
bf66625e | 69 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
01175474 | 70 | |
bf66625e | 71 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
01175474 | 72 | if (drv->cpu_addr) |
089920f2 | 73 | *drv->cpu_addr = cpu_to_le32(seq); |
30eb77f4 | 74 | } else { |
bf66625e | 75 | WREG32(drv->scratch_reg, seq); |
30eb77f4 | 76 | } |
b81157d0 AD |
77 | } |
78 | ||
d66b7ec2 AD |
79 | /** |
80 | * radeon_fence_read - read a fence value | |
81 | * | |
82 | * @rdev: radeon_device pointer | |
83 | * @ring: ring index the fence is associated with | |
84 | * | |
85 | * Reads a fence value from memory or a scratch register (all asics). | |
86 | * Returns the value of the fence read from memory or register. | |
87 | */ | |
7465280c | 88 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
b81157d0 | 89 | { |
bf66625e | 90 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
7465280c | 91 | u32 seq = 0; |
b81157d0 | 92 | |
bf66625e | 93 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
01175474 | 94 | if (drv->cpu_addr) |
089920f2 | 95 | seq = le32_to_cpu(*drv->cpu_addr); |
01175474 | 96 | else |
089920f2 | 97 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); |
30eb77f4 | 98 | } else { |
bf66625e | 99 | seq = RREG32(drv->scratch_reg); |
30eb77f4 | 100 | } |
b81157d0 AD |
101 | return seq; |
102 | } | |
103 | ||
0bfa4b41 CK |
104 | /** |
105 | * radeon_fence_schedule_check - schedule lockup check | |
106 | * | |
107 | * @rdev: radeon_device pointer | |
108 | * @ring: ring index we should work with | |
109 | * | |
110 | * Queues a delayed work item to check for lockups. | |
111 | */ | |
112 | static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) | |
113 | { | |
114 | /* | |
115 | * Do not reset the timer here with mod_delayed_work, | |
116 | * this can livelock in an interaction with TTM delayed destroy. | |
117 | */ | |
118 | queue_delayed_work(system_power_efficient_wq, | |
119 | &rdev->fence_drv[ring].lockup_work, | |
120 | RADEON_FENCE_JIFFIES_TIMEOUT); | |
121 | } | |
122 | ||
d66b7ec2 AD |
123 | /** |
124 | * radeon_fence_emit - emit a fence on the requested ring | |
125 | * | |
126 | * @rdev: radeon_device pointer | |
127 | * @fence: radeon fence object | |
128 | * @ring: ring index the fence is associated with | |
129 | * | |
130 | * Emits a fence command on the requested ring (all asics). | |
131 | * Returns 0 on success, -ENOMEM on failure. | |
132 | */ | |
876dc9f3 CK |
133 | int radeon_fence_emit(struct radeon_device *rdev, |
134 | struct radeon_fence **fence, | |
135 | int ring) | |
771fe6b9 | 136 | { |
b24c683a | 137 | u64 seq; |
954605ca | 138 | |
3b7a2b24 | 139 | /* we are protected by the ring emission mutex */ |
876dc9f3 | 140 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
01175474 | 141 | if ((*fence) == NULL) |
876dc9f3 | 142 | return -ENOMEM; |
01175474 | 143 | |
876dc9f3 | 144 | (*fence)->rdev = rdev; |
b24c683a | 145 | (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
876dc9f3 | 146 | (*fence)->ring = ring; |
ad1a58a4 | 147 | (*fence)->is_vm_update = false; |
f54d1867 CW |
148 | dma_fence_init(&(*fence)->base, &radeon_fence_ops, |
149 | &rdev->fence_queue.lock, | |
150 | rdev->fence_context + ring, | |
151 | seq); | |
876dc9f3 | 152 | radeon_fence_ring_emit(rdev, ring, *fence); |
fb1b5e1d | 153 | trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq); |
0bfa4b41 | 154 | radeon_fence_schedule_check(rdev, ring); |
771fe6b9 JG |
155 | return 0; |
156 | } | |
157 | ||
de1d598b | 158 | /* |
954605ca ML |
159 | * radeon_fence_check_signaled - callback from fence_queue |
160 | * | |
161 | * this function is called with fence_queue lock held, which is also used | |
162 | * for the fence locking itself, so unlocked variants are used for | |
163 | * fence_signal, and remove_wait_queue. | |
164 | */ | |
01175474 SS |
165 | static int radeon_fence_check_signaled(wait_queue_entry_t *wait, |
166 | unsigned int mode, int flags, void *key) | |
954605ca ML |
167 | { |
168 | struct radeon_fence *fence; | |
169 | u64 seq; | |
170 | ||
171 | fence = container_of(wait, struct radeon_fence, fence_wake); | |
172 | ||
173 | /* | |
174 | * We cannot use radeon_fence_process here because we're already | |
175 | * in the waitqueue, in a call from wake_up_all. | |
176 | */ | |
177 | seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); | |
178 | if (seq >= fence->seq) { | |
d72277b6 | 179 | dma_fence_signal_locked(&fence->base); |
954605ca ML |
180 | radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); |
181 | __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); | |
f54d1867 | 182 | dma_fence_put(&fence->base); |
d72277b6 | 183 | } |
954605ca ML |
184 | return 0; |
185 | } | |
186 | ||
d66b7ec2 | 187 | /** |
0bfa4b41 | 188 | * radeon_fence_activity - check for fence activity |
d66b7ec2 AD |
189 | * |
190 | * @rdev: radeon_device pointer | |
191 | * @ring: ring index the fence is associated with | |
192 | * | |
0bfa4b41 CK |
193 | * Checks the current fence value and calculates the last |
194 | * signalled fence value. Returns true if activity occured | |
195 | * on the ring, and the fence_queue should be waken up. | |
d66b7ec2 | 196 | */ |
0bfa4b41 | 197 | static bool radeon_fence_activity(struct radeon_device *rdev, int ring) |
771fe6b9 | 198 | { |
f492c171 | 199 | uint64_t seq, last_seq, last_emitted; |
01175474 | 200 | unsigned int count_loop = 0; |
771fe6b9 JG |
201 | bool wake = false; |
202 | ||
bb635567 JG |
203 | /* Note there is a scenario here for an infinite loop but it's |
204 | * very unlikely to happen. For it to happen, the current polling | |
205 | * process need to be interrupted by another process and another | |
206 | * process needs to update the last_seq btw the atomic read and | |
207 | * xchg of the current process. | |
208 | * | |
209 | * More over for this to go in infinite loop there need to be | |
210 | * continuously new fence signaled ie radeon_fence_read needs | |
211 | * to return a different value each time for both the currently | |
212 | * polling process and the other process that xchg the last_seq | |
213 | * btw atomic read and xchg of the current process. And the | |
214 | * value the other process set as last seq must be higher than | |
215 | * the seq value we just read. Which means that current process | |
216 | * need to be interrupted after radeon_fence_read and before | |
217 | * atomic xchg. | |
218 | * | |
219 | * To be even more safe we count the number of time we loop and | |
220 | * we bail after 10 loop just accepting the fact that we might | |
221 | * have temporarly set the last_seq not to the true real last | |
222 | * seq but to an older one. | |
223 | */ | |
224 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); | |
225 | do { | |
f492c171 | 226 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
bb635567 JG |
227 | seq = radeon_fence_read(rdev, ring); |
228 | seq |= last_seq & 0xffffffff00000000LL; | |
229 | if (seq < last_seq) { | |
f492c171 CK |
230 | seq &= 0xffffffff; |
231 | seq |= last_emitted & 0xffffffff00000000LL; | |
bb635567 | 232 | } |
36abacae | 233 | |
01175474 | 234 | if (seq <= last_seq || seq > last_emitted) |
3b7a2b24 | 235 | break; |
01175474 | 236 | |
bb635567 JG |
237 | /* If we loop over we don't want to return without |
238 | * checking if a fence is signaled as it means that the | |
239 | * seq we just read is different from the previous on. | |
240 | */ | |
241 | wake = true; | |
3b7a2b24 | 242 | last_seq = seq; |
bb635567 JG |
243 | if ((count_loop++) > 10) { |
244 | /* We looped over too many time leave with the | |
245 | * fact that we might have set an older fence | |
246 | * seq then the current real last seq as signaled | |
247 | * by the hw. | |
248 | */ | |
249 | break; | |
250 | } | |
bb635567 JG |
251 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
252 | ||
0bfa4b41 CK |
253 | if (seq < last_emitted) |
254 | radeon_fence_schedule_check(rdev, ring); | |
255 | ||
256 | return wake; | |
257 | } | |
258 | ||
259 | /** | |
260 | * radeon_fence_check_lockup - check for hardware lockup | |
261 | * | |
262 | * @work: delayed work item | |
263 | * | |
264 | * Checks for fence activity and if there is none probe | |
265 | * the hardware if a lockup occured. | |
266 | */ | |
267 | static void radeon_fence_check_lockup(struct work_struct *work) | |
268 | { | |
269 | struct radeon_fence_driver *fence_drv; | |
270 | struct radeon_device *rdev; | |
271 | int ring; | |
272 | ||
273 | fence_drv = container_of(work, struct radeon_fence_driver, | |
274 | lockup_work.work); | |
275 | rdev = fence_drv->rdev; | |
276 | ring = fence_drv - &rdev->fence_drv[0]; | |
277 | ||
278 | if (!down_read_trylock(&rdev->exclusive_lock)) { | |
279 | /* just reschedule the check if a reset is going on */ | |
280 | radeon_fence_schedule_check(rdev, ring); | |
281 | return; | |
282 | } | |
283 | ||
b2ffa2c2 | 284 | if (fence_drv->delayed_irq && rdev->irq.installed) { |
954605ca ML |
285 | unsigned long irqflags; |
286 | ||
287 | fence_drv->delayed_irq = false; | |
288 | spin_lock_irqsave(&rdev->irq.lock, irqflags); | |
289 | radeon_irq_set(rdev); | |
290 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); | |
291 | } | |
292 | ||
0bfa4b41 CK |
293 | if (radeon_fence_activity(rdev, ring)) |
294 | wake_up_all(&rdev->fence_queue); | |
295 | ||
296 | else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { | |
297 | ||
298 | /* good news we believe it's a lockup */ | |
01175474 | 299 | dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n", |
0bfa4b41 CK |
300 | (uint64_t)atomic64_read(&fence_drv->last_seq), |
301 | fence_drv->sync_seq[ring], ring); | |
302 | ||
303 | /* remember that we need an reset */ | |
304 | rdev->needs_reset = true; | |
305 | wake_up_all(&rdev->fence_queue); | |
306 | } | |
307 | up_read(&rdev->exclusive_lock); | |
308 | } | |
309 | ||
310 | /** | |
311 | * radeon_fence_process - process a fence | |
312 | * | |
313 | * @rdev: radeon_device pointer | |
314 | * @ring: ring index the fence is associated with | |
315 | * | |
316 | * Checks the current fence value and wakes the fence queue | |
317 | * if the sequence number has increased (all asics). | |
318 | */ | |
319 | void radeon_fence_process(struct radeon_device *rdev, int ring) | |
320 | { | |
321 | if (radeon_fence_activity(rdev, ring)) | |
0085c950 | 322 | wake_up_all(&rdev->fence_queue); |
771fe6b9 JG |
323 | } |
324 | ||
d66b7ec2 | 325 | /** |
f9eaf9ae | 326 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
d66b7ec2 AD |
327 | * |
328 | * @rdev: radeon device pointer | |
329 | * @seq: sequence number | |
330 | * @ring: ring index the fence is associated with | |
331 | * | |
f9eaf9ae | 332 | * Check if the last signaled fence sequnce number is >= the requested |
d66b7ec2 AD |
333 | * sequence number (all asics). |
334 | * Returns true if the fence has signaled (current fence value | |
335 | * is >= requested value) or false if it has not (current fence | |
336 | * value is < the requested value. Helper function for | |
337 | * radeon_fence_signaled(). | |
338 | */ | |
3b7a2b24 | 339 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
01175474 | 340 | u64 seq, unsigned int ring) |
771fe6b9 | 341 | { |
01175474 | 342 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) |
3b7a2b24 | 343 | return true; |
01175474 | 344 | |
3b7a2b24 JG |
345 | /* poll new last sequence at least once */ |
346 | radeon_fence_process(rdev, ring); | |
01175474 | 347 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) |
771fe6b9 | 348 | return true; |
01175474 | 349 | |
3b7a2b24 JG |
350 | return false; |
351 | } | |
3655d54a | 352 | |
f54d1867 | 353 | static bool radeon_fence_is_signaled(struct dma_fence *f) |
954605ca ML |
354 | { |
355 | struct radeon_fence *fence = to_radeon_fence(f); | |
356 | struct radeon_device *rdev = fence->rdev; | |
01175474 | 357 | unsigned int ring = fence->ring; |
954605ca ML |
358 | u64 seq = fence->seq; |
359 | ||
01175474 | 360 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) |
954605ca | 361 | return true; |
954605ca ML |
362 | |
363 | if (down_read_trylock(&rdev->exclusive_lock)) { | |
364 | radeon_fence_process(rdev, ring); | |
365 | up_read(&rdev->exclusive_lock); | |
366 | ||
01175474 | 367 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) |
954605ca | 368 | return true; |
954605ca ML |
369 | } |
370 | return false; | |
371 | } | |
372 | ||
373 | /** | |
374 | * radeon_fence_enable_signaling - enable signalling on fence | |
de1d598b | 375 | * @f: fence |
954605ca ML |
376 | * |
377 | * This function is called with fence_queue lock held, and adds a callback | |
378 | * to fence_queue that checks if this fence is signaled, and if so it | |
379 | * signals the fence and removes itself. | |
380 | */ | |
f54d1867 | 381 | static bool radeon_fence_enable_signaling(struct dma_fence *f) |
954605ca ML |
382 | { |
383 | struct radeon_fence *fence = to_radeon_fence(f); | |
384 | struct radeon_device *rdev = fence->rdev; | |
385 | ||
386 | if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) | |
387 | return false; | |
388 | ||
389 | if (down_read_trylock(&rdev->exclusive_lock)) { | |
390 | radeon_irq_kms_sw_irq_get(rdev, fence->ring); | |
391 | ||
392 | if (radeon_fence_activity(rdev, fence->ring)) | |
393 | wake_up_all_locked(&rdev->fence_queue); | |
394 | ||
395 | /* did fence get signaled after we enabled the sw irq? */ | |
396 | if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { | |
397 | radeon_irq_kms_sw_irq_put(rdev, fence->ring); | |
398 | up_read(&rdev->exclusive_lock); | |
399 | return false; | |
400 | } | |
401 | ||
402 | up_read(&rdev->exclusive_lock); | |
403 | } else { | |
404 | /* we're probably in a lockup, lets not fiddle too much */ | |
405 | if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) | |
406 | rdev->fence_drv[fence->ring].delayed_irq = true; | |
407 | radeon_fence_schedule_check(rdev, fence->ring); | |
408 | } | |
409 | ||
410 | fence->fence_wake.flags = 0; | |
411 | fence->fence_wake.private = NULL; | |
412 | fence->fence_wake.func = radeon_fence_check_signaled; | |
413 | __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); | |
f54d1867 | 414 | dma_fence_get(f); |
954605ca ML |
415 | return true; |
416 | } | |
417 | ||
d66b7ec2 AD |
418 | /** |
419 | * radeon_fence_signaled - check if a fence has signaled | |
420 | * | |
421 | * @fence: radeon fence object | |
422 | * | |
423 | * Check if the requested fence has signaled (all asics). | |
424 | * Returns true if the fence has signaled or false if it has not. | |
425 | */ | |
3b7a2b24 JG |
426 | bool radeon_fence_signaled(struct radeon_fence *fence) |
427 | { | |
d6d5c5b8 | 428 | if (!fence) |
3b7a2b24 | 429 | return true; |
954605ca ML |
430 | |
431 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { | |
d72277b6 | 432 | dma_fence_signal(&fence->base); |
3b7a2b24 | 433 | return true; |
954605ca | 434 | } |
3b7a2b24 | 435 | return false; |
771fe6b9 JG |
436 | } |
437 | ||
d66b7ec2 | 438 | /** |
f9eaf9ae | 439 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
d66b7ec2 AD |
440 | * |
441 | * @rdev: radeon device pointer | |
f9eaf9ae CK |
442 | * @seq: sequence numbers |
443 | * | |
444 | * Check if the last signaled fence sequnce number is >= the requested | |
445 | * sequence number (all asics). | |
446 | * Returns true if any has signaled (current value is >= requested value) | |
447 | * or false if it has not. Helper function for radeon_fence_wait_seq. | |
448 | */ | |
449 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |
450 | { | |
01175474 | 451 | unsigned int i; |
f9eaf9ae CK |
452 | |
453 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
454 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) | |
455 | return true; | |
456 | } | |
457 | return false; | |
458 | } | |
459 | ||
460 | /** | |
9867d00d | 461 | * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers |
f9eaf9ae CK |
462 | * |
463 | * @rdev: radeon device pointer | |
464 | * @target_seq: sequence number(s) we want to wait for | |
d66b7ec2 | 465 | * @intr: use interruptable sleep |
9867d00d | 466 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
d66b7ec2 | 467 | * |
f9eaf9ae CK |
468 | * Wait for the requested sequence number(s) to be written by any ring |
469 | * (all asics). Sequnce number array is indexed by ring id. | |
d66b7ec2 AD |
470 | * @intr selects whether to use interruptable (true) or non-interruptable |
471 | * (false) sleep when waiting for the sequence number. Helper function | |
f9eaf9ae | 472 | * for radeon_fence_wait_*(). |
9867d00d ML |
473 | * Returns remaining time if the sequence number has passed, 0 when |
474 | * the wait timeout, or an error for all other cases. | |
f9eaf9ae | 475 | * -EDEADLK is returned when a GPU lockup has been detected. |
d66b7ec2 | 476 | */ |
9867d00d ML |
477 | static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, |
478 | u64 *target_seq, bool intr, | |
479 | long timeout) | |
771fe6b9 | 480 | { |
0bfa4b41 CK |
481 | long r; |
482 | int i; | |
f9eaf9ae | 483 | |
0bfa4b41 | 484 | if (radeon_fence_any_seq_signaled(rdev, target_seq)) |
9867d00d | 485 | return timeout; |
771fe6b9 | 486 | |
0bfa4b41 CK |
487 | /* enable IRQs and tracing */ |
488 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
489 | if (!target_seq[i]) | |
490 | continue; | |
36abacae | 491 | |
fb1b5e1d | 492 | trace_radeon_fence_wait_begin(rdev_to_drm(rdev), i, target_seq[i]); |
0bfa4b41 CK |
493 | radeon_irq_kms_sw_irq_get(rdev, i); |
494 | } | |
36abacae | 495 | |
0bfa4b41 CK |
496 | if (intr) { |
497 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( | |
498 | radeon_fence_any_seq_signaled(rdev, target_seq) | |
9867d00d | 499 | || rdev->needs_reset), timeout); |
0bfa4b41 CK |
500 | } else { |
501 | r = wait_event_timeout(rdev->fence_queue, ( | |
502 | radeon_fence_any_seq_signaled(rdev, target_seq) | |
9867d00d | 503 | || rdev->needs_reset), timeout); |
0bfa4b41 | 504 | } |
f9eaf9ae | 505 | |
0bfa4b41 CK |
506 | if (rdev->needs_reset) |
507 | r = -EDEADLK; | |
f9eaf9ae | 508 | |
0bfa4b41 CK |
509 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
510 | if (!target_seq[i]) | |
511 | continue; | |
25a9e352 | 512 | |
0bfa4b41 | 513 | radeon_irq_kms_sw_irq_put(rdev, i); |
fb1b5e1d | 514 | trace_radeon_fence_wait_end(rdev_to_drm(rdev), i, target_seq[i]); |
771fe6b9 | 515 | } |
0bfa4b41 | 516 | |
9867d00d | 517 | return r; |
771fe6b9 JG |
518 | } |
519 | ||
d66b7ec2 | 520 | /** |
04db4caf | 521 | * radeon_fence_wait_timeout - wait for a fence to signal with timeout |
d66b7ec2 AD |
522 | * |
523 | * @fence: radeon fence object | |
9867d00d | 524 | * @intr: use interruptible sleep |
d66b7ec2 AD |
525 | * |
526 | * Wait for the requested fence to signal (all asics). | |
527 | * @intr selects whether to use interruptable (true) or non-interruptable | |
528 | * (false) sleep when waiting for the fence. | |
04db4caf MD |
529 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
530 | * Returns remaining time if the sequence number has passed, 0 when | |
531 | * the wait timeout, or an error for all other cases. | |
d66b7ec2 | 532 | */ |
04db4caf | 533 | long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) |
771fe6b9 | 534 | { |
f9eaf9ae | 535 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
9867d00d | 536 | long r; |
771fe6b9 | 537 | |
392a250b ML |
538 | /* |
539 | * This function should not be called on !radeon fences. | |
540 | * If this is the case, it would mean this function can | |
541 | * also be called on radeon fences belonging to another card. | |
542 | * exclusive_lock is not held in that case. | |
543 | */ | |
544 | if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) | |
f54d1867 | 545 | return dma_fence_wait(&fence->base, intr); |
392a250b | 546 | |
f9eaf9ae | 547 | seq[fence->ring] = fence->seq; |
04db4caf | 548 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); |
01175474 | 549 | if (r <= 0) |
f9eaf9ae | 550 | return r; |
0085c950 | 551 | |
d72277b6 | 552 | dma_fence_signal(&fence->base); |
04db4caf MD |
553 | return r; |
554 | } | |
555 | ||
556 | /** | |
557 | * radeon_fence_wait - wait for a fence to signal | |
558 | * | |
559 | * @fence: radeon fence object | |
560 | * @intr: use interruptible sleep | |
561 | * | |
562 | * Wait for the requested fence to signal (all asics). | |
563 | * @intr selects whether to use interruptable (true) or non-interruptable | |
564 | * (false) sleep when waiting for the fence. | |
565 | * Returns 0 if the fence has passed, error for all other cases. | |
566 | */ | |
567 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |
568 | { | |
569 | long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); | |
01175474 SS |
570 | |
571 | if (r > 0) | |
04db4caf | 572 | return 0; |
01175474 | 573 | else |
04db4caf | 574 | return r; |
0085c950 JG |
575 | } |
576 | ||
1654b817 | 577 | /** |
37615527 | 578 | * radeon_fence_wait_next - wait for the next fence to signal |
d66b7ec2 AD |
579 | * |
580 | * @rdev: radeon device pointer | |
581 | * @ring: ring index the fence is associated with | |
582 | * | |
583 | * Wait for the next fence on the requested ring to signal (all asics). | |
584 | * Returns 0 if the next fence has passed, error for all other cases. | |
585 | * Caller must hold ring lock. | |
586 | */ | |
37615527 | 587 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
771fe6b9 | 588 | { |
f9eaf9ae | 589 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
9867d00d | 590 | long r; |
771fe6b9 | 591 | |
f9eaf9ae CK |
592 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
593 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { | |
01175474 SS |
594 | /* nothing to wait for, last_seq is already |
595 | * the last emited fence | |
596 | */ | |
8a47cc9e | 597 | return -ENOENT; |
771fe6b9 | 598 | } |
01175474 | 599 | |
9867d00d ML |
600 | r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
601 | if (r < 0) | |
602 | return r; | |
01175474 | 603 | |
9867d00d | 604 | return 0; |
3b7a2b24 JG |
605 | } |
606 | ||
d66b7ec2 | 607 | /** |
37615527 | 608 | * radeon_fence_wait_empty - wait for all fences to signal |
d66b7ec2 AD |
609 | * |
610 | * @rdev: radeon device pointer | |
611 | * @ring: ring index the fence is associated with | |
612 | * | |
613 | * Wait for all fences on the requested ring to signal (all asics). | |
614 | * Returns 0 if the fences have passed, error for all other cases. | |
615 | * Caller must hold ring lock. | |
616 | */ | |
37615527 | 617 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
3b7a2b24 | 618 | { |
f9eaf9ae | 619 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
9867d00d | 620 | long r; |
7ecc45e3 | 621 | |
f9eaf9ae | 622 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
721529b5 CK |
623 | if (!seq[ring]) |
624 | return 0; | |
625 | ||
9867d00d ML |
626 | r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
627 | if (r < 0) { | |
f9eaf9ae | 628 | if (r == -EDEADLK) |
5f8f635e | 629 | return -EDEADLK; |
f9eaf9ae | 630 | |
9867d00d | 631 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", |
5f8f635e | 632 | ring, r); |
7ecc45e3 | 633 | } |
5f8f635e | 634 | return 0; |
771fe6b9 JG |
635 | } |
636 | ||
d66b7ec2 AD |
637 | /** |
638 | * radeon_fence_ref - take a ref on a fence | |
639 | * | |
640 | * @fence: radeon fence object | |
641 | * | |
642 | * Take a reference on a fence (all asics). | |
643 | * Returns the fence. | |
644 | */ | |
771fe6b9 JG |
645 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
646 | { | |
f54d1867 | 647 | dma_fence_get(&fence->base); |
771fe6b9 JG |
648 | return fence; |
649 | } | |
650 | ||
d66b7ec2 AD |
651 | /** |
652 | * radeon_fence_unref - remove a ref on a fence | |
653 | * | |
654 | * @fence: radeon fence object | |
655 | * | |
656 | * Remove a reference on a fence (all asics). | |
657 | */ | |
771fe6b9 JG |
658 | void radeon_fence_unref(struct radeon_fence **fence) |
659 | { | |
660 | struct radeon_fence *tmp = *fence; | |
661 | ||
662 | *fence = NULL; | |
01175474 | 663 | if (tmp) |
f54d1867 | 664 | dma_fence_put(&tmp->base); |
771fe6b9 JG |
665 | } |
666 | ||
d66b7ec2 AD |
667 | /** |
668 | * radeon_fence_count_emitted - get the count of emitted fences | |
669 | * | |
670 | * @rdev: radeon device pointer | |
671 | * @ring: ring index the fence is associated with | |
672 | * | |
673 | * Get the number of fences emitted on the requested ring (all asics). | |
674 | * Returns the number of emitted fences on the ring. Used by the | |
675 | * dynpm code to ring track activity. | |
676 | */ | |
01175474 | 677 | unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
771fe6b9 | 678 | { |
3b7a2b24 | 679 | uint64_t emitted; |
771fe6b9 | 680 | |
3b7a2b24 JG |
681 | /* We are not protected by ring lock when reading the last sequence |
682 | * but it's ok to report slightly wrong fence count here. | |
683 | */ | |
0085c950 | 684 | radeon_fence_process(rdev, ring); |
68e250b7 CK |
685 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
686 | - atomic64_read(&rdev->fence_drv[ring].last_seq); | |
3b7a2b24 | 687 | /* to avoid 32bits warp around */ |
01175474 | 688 | if (emitted > 0x10000000) |
3b7a2b24 | 689 | emitted = 0x10000000; |
01175474 SS |
690 | |
691 | return (unsigned int)emitted; | |
47492a23 CK |
692 | } |
693 | ||
d66b7ec2 AD |
694 | /** |
695 | * radeon_fence_need_sync - do we need a semaphore | |
696 | * | |
697 | * @fence: radeon fence object | |
698 | * @dst_ring: which ring to check against | |
699 | * | |
700 | * Check if the fence needs to be synced against another ring | |
701 | * (all asics). If so, we need to emit a semaphore. | |
702 | * Returns true if we need to sync with another ring, false if | |
703 | * not. | |
704 | */ | |
68e250b7 CK |
705 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
706 | { | |
707 | struct radeon_fence_driver *fdrv; | |
708 | ||
01175474 | 709 | if (!fence) |
68e250b7 | 710 | return false; |
68e250b7 | 711 | |
01175474 | 712 | if (fence->ring == dst_ring) |
68e250b7 | 713 | return false; |
68e250b7 CK |
714 | |
715 | /* we are protected by the ring mutex */ | |
716 | fdrv = &fence->rdev->fence_drv[dst_ring]; | |
01175474 | 717 | if (fence->seq <= fdrv->sync_seq[fence->ring]) |
68e250b7 | 718 | return false; |
68e250b7 CK |
719 | |
720 | return true; | |
721 | } | |
722 | ||
d66b7ec2 AD |
723 | /** |
724 | * radeon_fence_note_sync - record the sync point | |
725 | * | |
726 | * @fence: radeon fence object | |
727 | * @dst_ring: which ring to check against | |
728 | * | |
729 | * Note the sequence number at which point the fence will | |
730 | * be synced with the requested ring (all asics). | |
731 | */ | |
68e250b7 CK |
732 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
733 | { | |
734 | struct radeon_fence_driver *dst, *src; | |
01175474 | 735 | unsigned int i; |
68e250b7 | 736 | |
01175474 | 737 | if (!fence) |
68e250b7 | 738 | return; |
68e250b7 | 739 | |
01175474 | 740 | if (fence->ring == dst_ring) |
68e250b7 | 741 | return; |
68e250b7 CK |
742 | |
743 | /* we are protected by the ring mutex */ | |
744 | src = &fence->rdev->fence_drv[fence->ring]; | |
745 | dst = &fence->rdev->fence_drv[dst_ring]; | |
746 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
01175474 | 747 | if (i == dst_ring) |
68e250b7 | 748 | continue; |
01175474 | 749 | |
68e250b7 CK |
750 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
751 | } | |
752 | } | |
753 | ||
d66b7ec2 AD |
754 | /** |
755 | * radeon_fence_driver_start_ring - make the fence driver | |
756 | * ready for use on the requested ring. | |
757 | * | |
758 | * @rdev: radeon device pointer | |
759 | * @ring: ring index to start the fence driver on | |
760 | * | |
761 | * Make the fence driver ready for processing (all asics). | |
762 | * Not all asics have all rings, so each asic will only | |
763 | * start the fence driver on the rings it has. | |
764 | * Returns 0 for success, errors for failure. | |
765 | */ | |
30eb77f4 | 766 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
771fe6b9 | 767 | { |
30eb77f4 JG |
768 | uint64_t index; |
769 | int r; | |
771fe6b9 | 770 | |
30eb77f4 | 771 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
86a1881d | 772 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
581bc3a9 | 773 | rdev->fence_drv[ring].scratch_reg = 0; |
f2ba57b5 | 774 | if (ring != R600_RING_TYPE_UVD_INDEX) { |
f2ba57b5 CK |
775 | index = R600_WB_EVENT_OFFSET + ring * 4; |
776 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; | |
777 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + | |
778 | index; | |
779 | ||
780 | } else { | |
781 | /* put fence directly behind firmware */ | |
4ad9c1c7 | 782 | index = ALIGN(rdev->uvd_fw->size, 8); |
d7c605a2 CK |
783 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
784 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; | |
f2ba57b5 CK |
785 | } |
786 | ||
30eb77f4 | 787 | } else { |
7465280c AD |
788 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
789 | if (r) { | |
790 | dev_err(rdev->dev, "fence failed to get scratch register\n"); | |
7465280c AD |
791 | return r; |
792 | } | |
30eb77f4 JG |
793 | index = RADEON_WB_SCRATCH_OFFSET + |
794 | rdev->fence_drv[ring].scratch_reg - | |
795 | rdev->scratch.reg_base; | |
f2ba57b5 CK |
796 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
797 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; | |
7465280c | 798 | } |
31be6183 | 799 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
30eb77f4 | 800 | rdev->fence_drv[ring].initialized = true; |
edbf0f30 | 801 | dev_info(rdev->dev, "fence driver on ring %d uses gpu addr 0x%016llx\n", |
e241df69 | 802 | ring, rdev->fence_drv[ring].gpu_addr); |
30eb77f4 JG |
803 | return 0; |
804 | } | |
805 | ||
d66b7ec2 AD |
806 | /** |
807 | * radeon_fence_driver_init_ring - init the fence driver | |
808 | * for the requested ring. | |
809 | * | |
810 | * @rdev: radeon device pointer | |
811 | * @ring: ring index to start the fence driver on | |
812 | * | |
813 | * Init the fence driver for the requested ring (all asics). | |
814 | * Helper function for radeon_fence_driver_init(). | |
815 | */ | |
30eb77f4 JG |
816 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
817 | { | |
68e250b7 CK |
818 | int i; |
819 | ||
30eb77f4 JG |
820 | rdev->fence_drv[ring].scratch_reg = -1; |
821 | rdev->fence_drv[ring].cpu_addr = NULL; | |
822 | rdev->fence_drv[ring].gpu_addr = 0; | |
68e250b7 CK |
823 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
824 | rdev->fence_drv[ring].sync_seq[i] = 0; | |
bb635567 | 825 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
30eb77f4 | 826 | rdev->fence_drv[ring].initialized = false; |
0bfa4b41 CK |
827 | INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, |
828 | radeon_fence_check_lockup); | |
829 | rdev->fence_drv[ring].rdev = rdev; | |
30eb77f4 JG |
830 | } |
831 | ||
d66b7ec2 AD |
832 | /** |
833 | * radeon_fence_driver_init - init the fence driver | |
834 | * for all possible rings. | |
835 | * | |
836 | * @rdev: radeon device pointer | |
837 | * | |
838 | * Init the fence driver for all possible rings (all asics). | |
839 | * Not all asics have all rings, so each asic will only | |
840 | * start the fence driver on the rings it has using | |
841 | * radeon_fence_driver_start_ring(). | |
d66b7ec2 | 842 | */ |
519424d7 | 843 | void radeon_fence_driver_init(struct radeon_device *rdev) |
30eb77f4 | 844 | { |
30eb77f4 JG |
845 | int ring; |
846 | ||
0085c950 | 847 | init_waitqueue_head(&rdev->fence_queue); |
01175474 | 848 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) |
30eb77f4 | 849 | radeon_fence_driver_init_ring(rdev, ring); |
5b54d679 ND |
850 | |
851 | radeon_debugfs_fence_init(rdev); | |
771fe6b9 JG |
852 | } |
853 | ||
d66b7ec2 AD |
854 | /** |
855 | * radeon_fence_driver_fini - tear down the fence driver | |
856 | * for all possible rings. | |
857 | * | |
858 | * @rdev: radeon device pointer | |
859 | * | |
860 | * Tear down the fence driver for all possible rings (all asics). | |
861 | */ | |
771fe6b9 JG |
862 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
863 | { | |
5f8f635e | 864 | int ring, r; |
7465280c | 865 | |
8a47cc9e | 866 | mutex_lock(&rdev->ring_lock); |
7465280c AD |
867 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
868 | if (!rdev->fence_drv[ring].initialized) | |
869 | continue; | |
37615527 | 870 | r = radeon_fence_wait_empty(rdev, ring); |
5f8f635e JG |
871 | if (r) { |
872 | /* no need to trigger GPU reset as we are unloading */ | |
eb98c709 | 873 | radeon_fence_driver_force_completion(rdev, ring); |
5f8f635e | 874 | } |
0bfa4b41 | 875 | cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); |
0085c950 | 876 | wake_up_all(&rdev->fence_queue); |
7465280c | 877 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
7465280c AD |
878 | rdev->fence_drv[ring].initialized = false; |
879 | } | |
8a47cc9e | 880 | mutex_unlock(&rdev->ring_lock); |
771fe6b9 JG |
881 | } |
882 | ||
76903b96 JG |
883 | /** |
884 | * radeon_fence_driver_force_completion - force all fence waiter to complete | |
885 | * | |
886 | * @rdev: radeon device pointer | |
eb98c709 | 887 | * @ring: the ring to complete |
76903b96 JG |
888 | * |
889 | * In case of GPU reset failure make sure no process keep waiting on fence | |
890 | * that will never complete. | |
891 | */ | |
eb98c709 | 892 | void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) |
76903b96 | 893 | { |
0bfa4b41 | 894 | if (rdev->fence_drv[ring].initialized) { |
76903b96 | 895 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
0bfa4b41 CK |
896 | cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); |
897 | } | |
76903b96 JG |
898 | } |
899 | ||
771fe6b9 JG |
900 | |
901 | /* | |
902 | * Fence debugfs | |
903 | */ | |
904 | #if defined(CONFIG_DEBUG_FS) | |
5b54d679 | 905 | static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data) |
771fe6b9 | 906 | { |
6091ede9 | 907 | struct radeon_device *rdev = m->private; |
68e250b7 | 908 | int i, j; |
7465280c AD |
909 | |
910 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
911 | if (!rdev->fence_drv[i].initialized) | |
912 | continue; | |
913 | ||
e290b634 CK |
914 | radeon_fence_process(rdev, i); |
915 | ||
7465280c | 916 | seq_printf(m, "--- ring %d ---\n", i); |
d3029b4e DA |
917 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
918 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); | |
68e250b7 CK |
919 | seq_printf(m, "Last emitted 0x%016llx\n", |
920 | rdev->fence_drv[i].sync_seq[i]); | |
921 | ||
922 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { | |
923 | if (i != j && rdev->fence_drv[j].initialized) | |
924 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", | |
925 | j, rdev->fence_drv[i].sync_seq[j]); | |
926 | } | |
771fe6b9 JG |
927 | } |
928 | return 0; | |
929 | } | |
930 | ||
de1d598b | 931 | /* |
478b6e72 CK |
932 | * radeon_debugfs_gpu_reset - manually trigger a gpu reset |
933 | * | |
934 | * Manually trigger a gpu reset at the next fence wait. | |
935 | */ | |
5b54d679 | 936 | static int radeon_debugfs_gpu_reset(void *data, u64 *val) |
478b6e72 | 937 | { |
5b54d679 | 938 | struct radeon_device *rdev = (struct radeon_device *)data; |
478b6e72 CK |
939 | |
940 | down_read(&rdev->exclusive_lock); | |
5b54d679 | 941 | *val = rdev->needs_reset; |
478b6e72 | 942 | rdev->needs_reset = true; |
f0d970b4 | 943 | wake_up_all(&rdev->fence_queue); |
478b6e72 CK |
944 | up_read(&rdev->exclusive_lock); |
945 | ||
946 | return 0; | |
947 | } | |
5b54d679 ND |
948 | DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_fence_info); |
949 | DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops, | |
950 | radeon_debugfs_gpu_reset, NULL, "%lld\n"); | |
771fe6b9 JG |
951 | #endif |
952 | ||
5b54d679 | 953 | void radeon_debugfs_fence_init(struct radeon_device *rdev) |
771fe6b9 JG |
954 | { |
955 | #if defined(CONFIG_DEBUG_FS) | |
fb1b5e1d | 956 | struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; |
5b54d679 ND |
957 | |
958 | debugfs_create_file("radeon_gpu_reset", 0444, root, rdev, | |
959 | &radeon_debugfs_gpu_reset_fops); | |
960 | debugfs_create_file("radeon_fence_info", 0444, root, rdev, | |
961 | &radeon_debugfs_fence_info_fops); | |
962 | ||
963 | ||
771fe6b9 JG |
964 | #endif |
965 | } | |
954605ca | 966 | |
f54d1867 | 967 | static const char *radeon_fence_get_driver_name(struct dma_fence *fence) |
954605ca ML |
968 | { |
969 | return "radeon"; | |
970 | } | |
971 | ||
f54d1867 | 972 | static const char *radeon_fence_get_timeline_name(struct dma_fence *f) |
954605ca ML |
973 | { |
974 | struct radeon_fence *fence = to_radeon_fence(f); | |
01175474 | 975 | |
954605ca ML |
976 | switch (fence->ring) { |
977 | case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx"; | |
978 | case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1"; | |
979 | case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2"; | |
980 | case R600_RING_TYPE_DMA_INDEX: return "radeon.dma"; | |
981 | case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1"; | |
982 | case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd"; | |
983 | case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1"; | |
984 | case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2"; | |
01175474 SS |
985 | default: |
986 | WARN_ON_ONCE(1); | |
987 | return "radeon.unk"; | |
954605ca ML |
988 | } |
989 | } | |
990 | ||
991 | static inline bool radeon_test_signaled(struct radeon_fence *fence) | |
992 | { | |
f54d1867 | 993 | return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
954605ca ML |
994 | } |
995 | ||
b6610101 | 996 | struct radeon_wait_cb { |
f54d1867 | 997 | struct dma_fence_cb base; |
b6610101 ML |
998 | struct task_struct *task; |
999 | }; | |
1000 | ||
1001 | static void | |
f54d1867 | 1002 | radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
b6610101 ML |
1003 | { |
1004 | struct radeon_wait_cb *wait = | |
1005 | container_of(cb, struct radeon_wait_cb, base); | |
1006 | ||
1007 | wake_up_process(wait->task); | |
1008 | } | |
1009 | ||
f54d1867 | 1010 | static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, |
954605ca ML |
1011 | signed long t) |
1012 | { | |
1013 | struct radeon_fence *fence = to_radeon_fence(f); | |
1014 | struct radeon_device *rdev = fence->rdev; | |
b6610101 | 1015 | struct radeon_wait_cb cb; |
954605ca | 1016 | |
b6610101 | 1017 | cb.task = current; |
954605ca | 1018 | |
f54d1867 | 1019 | if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
b6610101 ML |
1020 | return t; |
1021 | ||
1022 | while (t > 0) { | |
1023 | if (intr) | |
1024 | set_current_state(TASK_INTERRUPTIBLE); | |
1025 | else | |
1026 | set_current_state(TASK_UNINTERRUPTIBLE); | |
1027 | ||
1028 | /* | |
1029 | * radeon_test_signaled must be called after | |
1030 | * set_current_state to prevent a race with wake_up_process | |
1031 | */ | |
1032 | if (radeon_test_signaled(fence)) | |
1033 | break; | |
1034 | ||
1035 | if (rdev->needs_reset) { | |
1036 | t = -EDEADLK; | |
1037 | break; | |
1038 | } | |
1039 | ||
1040 | t = schedule_timeout(t); | |
1041 | ||
1042 | if (t > 0 && intr && signal_pending(current)) | |
1043 | t = -ERESTARTSYS; | |
1044 | } | |
1045 | ||
1046 | __set_current_state(TASK_RUNNING); | |
f54d1867 | 1047 | dma_fence_remove_callback(f, &cb.base); |
954605ca | 1048 | |
954605ca ML |
1049 | return t; |
1050 | } | |
1051 | ||
f54d1867 | 1052 | const struct dma_fence_ops radeon_fence_ops = { |
954605ca ML |
1053 | .get_driver_name = radeon_fence_get_driver_name, |
1054 | .get_timeline_name = radeon_fence_get_timeline_name, | |
1055 | .enable_signaling = radeon_fence_enable_signaling, | |
1056 | .signaled = radeon_fence_is_signaled, | |
1057 | .wait = radeon_fence_default_wait, | |
1058 | .release = NULL, | |
1059 | }; |