Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_fence.c
... / ...
CommitLineData
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31
32#include <linux/atomic.h>
33#include <linux/debugfs.h>
34#include <linux/firmware.h>
35#include <linux/kref.h>
36#include <linux/sched/signal.h>
37#include <linux/seq_file.h>
38#include <linux/slab.h>
39#include <linux/wait.h>
40
41#include <drm/drm_device.h>
42#include <drm/drm_file.h>
43
44#include "radeon.h"
45#include "radeon_reg.h"
46#include "radeon_trace.h"
47
48/*
49 * Fences mark an event in the GPUs pipeline and are used
50 * for GPU/CPU synchronization. When the fence is written,
51 * it is expected that all buffers associated with that fence
52 * are no longer in use by the associated ring on the GPU and
53 * that the relevant GPU caches have been flushed. Whether
54 * we use a scratch register or memory location depends on the asic
55 * and whether writeback is enabled.
56 */
57
58/**
59 * radeon_fence_write - write a fence value
60 *
61 * @rdev: radeon_device pointer
62 * @seq: sequence number to write
63 * @ring: ring index the fence is associated with
64 *
65 * Writes a fence value to memory or a scratch register (all asics).
66 */
67static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
68{
69 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
70
71 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
72 if (drv->cpu_addr)
73 *drv->cpu_addr = cpu_to_le32(seq);
74 } else {
75 WREG32(drv->scratch_reg, seq);
76 }
77}
78
79/**
80 * radeon_fence_read - read a fence value
81 *
82 * @rdev: radeon_device pointer
83 * @ring: ring index the fence is associated with
84 *
85 * Reads a fence value from memory or a scratch register (all asics).
86 * Returns the value of the fence read from memory or register.
87 */
88static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
89{
90 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
91 u32 seq = 0;
92
93 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
94 if (drv->cpu_addr)
95 seq = le32_to_cpu(*drv->cpu_addr);
96 else
97 seq = lower_32_bits(atomic64_read(&drv->last_seq));
98 } else {
99 seq = RREG32(drv->scratch_reg);
100 }
101 return seq;
102}
103
104/**
105 * radeon_fence_schedule_check - schedule lockup check
106 *
107 * @rdev: radeon_device pointer
108 * @ring: ring index we should work with
109 *
110 * Queues a delayed work item to check for lockups.
111 */
112static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
113{
114 /*
115 * Do not reset the timer here with mod_delayed_work,
116 * this can livelock in an interaction with TTM delayed destroy.
117 */
118 queue_delayed_work(system_power_efficient_wq,
119 &rdev->fence_drv[ring].lockup_work,
120 RADEON_FENCE_JIFFIES_TIMEOUT);
121}
122
123/**
124 * radeon_fence_emit - emit a fence on the requested ring
125 *
126 * @rdev: radeon_device pointer
127 * @fence: radeon fence object
128 * @ring: ring index the fence is associated with
129 *
130 * Emits a fence command on the requested ring (all asics).
131 * Returns 0 on success, -ENOMEM on failure.
132 */
133int radeon_fence_emit(struct radeon_device *rdev,
134 struct radeon_fence **fence,
135 int ring)
136{
137 u64 seq;
138
139 /* we are protected by the ring emission mutex */
140 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
141 if ((*fence) == NULL)
142 return -ENOMEM;
143
144 (*fence)->rdev = rdev;
145 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
146 (*fence)->ring = ring;
147 (*fence)->is_vm_update = false;
148 dma_fence_init(&(*fence)->base, &radeon_fence_ops,
149 &rdev->fence_queue.lock,
150 rdev->fence_context + ring,
151 seq);
152 radeon_fence_ring_emit(rdev, ring, *fence);
153 trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq);
154 radeon_fence_schedule_check(rdev, ring);
155 return 0;
156}
157
158/*
159 * radeon_fence_check_signaled - callback from fence_queue
160 *
161 * this function is called with fence_queue lock held, which is also used
162 * for the fence locking itself, so unlocked variants are used for
163 * fence_signal, and remove_wait_queue.
164 */
165static int radeon_fence_check_signaled(wait_queue_entry_t *wait,
166 unsigned int mode, int flags, void *key)
167{
168 struct radeon_fence *fence;
169 u64 seq;
170
171 fence = container_of(wait, struct radeon_fence, fence_wake);
172
173 /*
174 * We cannot use radeon_fence_process here because we're already
175 * in the waitqueue, in a call from wake_up_all.
176 */
177 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
178 if (seq >= fence->seq) {
179 dma_fence_signal_locked(&fence->base);
180 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
181 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
182 dma_fence_put(&fence->base);
183 }
184 return 0;
185}
186
187/**
188 * radeon_fence_activity - check for fence activity
189 *
190 * @rdev: radeon_device pointer
191 * @ring: ring index the fence is associated with
192 *
193 * Checks the current fence value and calculates the last
194 * signalled fence value. Returns true if activity occured
195 * on the ring, and the fence_queue should be waken up.
196 */
197static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
198{
199 uint64_t seq, last_seq, last_emitted;
200 unsigned int count_loop = 0;
201 bool wake = false;
202
203 /* Note there is a scenario here for an infinite loop but it's
204 * very unlikely to happen. For it to happen, the current polling
205 * process need to be interrupted by another process and another
206 * process needs to update the last_seq btw the atomic read and
207 * xchg of the current process.
208 *
209 * More over for this to go in infinite loop there need to be
210 * continuously new fence signaled ie radeon_fence_read needs
211 * to return a different value each time for both the currently
212 * polling process and the other process that xchg the last_seq
213 * btw atomic read and xchg of the current process. And the
214 * value the other process set as last seq must be higher than
215 * the seq value we just read. Which means that current process
216 * need to be interrupted after radeon_fence_read and before
217 * atomic xchg.
218 *
219 * To be even more safe we count the number of time we loop and
220 * we bail after 10 loop just accepting the fact that we might
221 * have temporarly set the last_seq not to the true real last
222 * seq but to an older one.
223 */
224 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
225 do {
226 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
227 seq = radeon_fence_read(rdev, ring);
228 seq |= last_seq & 0xffffffff00000000LL;
229 if (seq < last_seq) {
230 seq &= 0xffffffff;
231 seq |= last_emitted & 0xffffffff00000000LL;
232 }
233
234 if (seq <= last_seq || seq > last_emitted)
235 break;
236
237 /* If we loop over we don't want to return without
238 * checking if a fence is signaled as it means that the
239 * seq we just read is different from the previous on.
240 */
241 wake = true;
242 last_seq = seq;
243 if ((count_loop++) > 10) {
244 /* We looped over too many time leave with the
245 * fact that we might have set an older fence
246 * seq then the current real last seq as signaled
247 * by the hw.
248 */
249 break;
250 }
251 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
252
253 if (seq < last_emitted)
254 radeon_fence_schedule_check(rdev, ring);
255
256 return wake;
257}
258
259/**
260 * radeon_fence_check_lockup - check for hardware lockup
261 *
262 * @work: delayed work item
263 *
264 * Checks for fence activity and if there is none probe
265 * the hardware if a lockup occured.
266 */
267static void radeon_fence_check_lockup(struct work_struct *work)
268{
269 struct radeon_fence_driver *fence_drv;
270 struct radeon_device *rdev;
271 int ring;
272
273 fence_drv = container_of(work, struct radeon_fence_driver,
274 lockup_work.work);
275 rdev = fence_drv->rdev;
276 ring = fence_drv - &rdev->fence_drv[0];
277
278 if (!down_read_trylock(&rdev->exclusive_lock)) {
279 /* just reschedule the check if a reset is going on */
280 radeon_fence_schedule_check(rdev, ring);
281 return;
282 }
283
284 if (fence_drv->delayed_irq && rdev->irq.installed) {
285 unsigned long irqflags;
286
287 fence_drv->delayed_irq = false;
288 spin_lock_irqsave(&rdev->irq.lock, irqflags);
289 radeon_irq_set(rdev);
290 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
291 }
292
293 if (radeon_fence_activity(rdev, ring))
294 wake_up_all(&rdev->fence_queue);
295
296 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
297
298 /* good news we believe it's a lockup */
299 dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n",
300 (uint64_t)atomic64_read(&fence_drv->last_seq),
301 fence_drv->sync_seq[ring], ring);
302
303 /* remember that we need an reset */
304 rdev->needs_reset = true;
305 wake_up_all(&rdev->fence_queue);
306 }
307 up_read(&rdev->exclusive_lock);
308}
309
310/**
311 * radeon_fence_process - process a fence
312 *
313 * @rdev: radeon_device pointer
314 * @ring: ring index the fence is associated with
315 *
316 * Checks the current fence value and wakes the fence queue
317 * if the sequence number has increased (all asics).
318 */
319void radeon_fence_process(struct radeon_device *rdev, int ring)
320{
321 if (radeon_fence_activity(rdev, ring))
322 wake_up_all(&rdev->fence_queue);
323}
324
325/**
326 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
327 *
328 * @rdev: radeon device pointer
329 * @seq: sequence number
330 * @ring: ring index the fence is associated with
331 *
332 * Check if the last signaled fence sequnce number is >= the requested
333 * sequence number (all asics).
334 * Returns true if the fence has signaled (current fence value
335 * is >= requested value) or false if it has not (current fence
336 * value is < the requested value. Helper function for
337 * radeon_fence_signaled().
338 */
339static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
340 u64 seq, unsigned int ring)
341{
342 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
343 return true;
344
345 /* poll new last sequence at least once */
346 radeon_fence_process(rdev, ring);
347 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
348 return true;
349
350 return false;
351}
352
353static bool radeon_fence_is_signaled(struct dma_fence *f)
354{
355 struct radeon_fence *fence = to_radeon_fence(f);
356 struct radeon_device *rdev = fence->rdev;
357 unsigned int ring = fence->ring;
358 u64 seq = fence->seq;
359
360 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
361 return true;
362
363 if (down_read_trylock(&rdev->exclusive_lock)) {
364 radeon_fence_process(rdev, ring);
365 up_read(&rdev->exclusive_lock);
366
367 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
368 return true;
369 }
370 return false;
371}
372
373/**
374 * radeon_fence_enable_signaling - enable signalling on fence
375 * @f: fence
376 *
377 * This function is called with fence_queue lock held, and adds a callback
378 * to fence_queue that checks if this fence is signaled, and if so it
379 * signals the fence and removes itself.
380 */
381static bool radeon_fence_enable_signaling(struct dma_fence *f)
382{
383 struct radeon_fence *fence = to_radeon_fence(f);
384 struct radeon_device *rdev = fence->rdev;
385
386 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
387 return false;
388
389 if (down_read_trylock(&rdev->exclusive_lock)) {
390 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
391
392 if (radeon_fence_activity(rdev, fence->ring))
393 wake_up_all_locked(&rdev->fence_queue);
394
395 /* did fence get signaled after we enabled the sw irq? */
396 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
397 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
398 up_read(&rdev->exclusive_lock);
399 return false;
400 }
401
402 up_read(&rdev->exclusive_lock);
403 } else {
404 /* we're probably in a lockup, lets not fiddle too much */
405 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
406 rdev->fence_drv[fence->ring].delayed_irq = true;
407 radeon_fence_schedule_check(rdev, fence->ring);
408 }
409
410 fence->fence_wake.flags = 0;
411 fence->fence_wake.private = NULL;
412 fence->fence_wake.func = radeon_fence_check_signaled;
413 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
414 dma_fence_get(f);
415 return true;
416}
417
418/**
419 * radeon_fence_signaled - check if a fence has signaled
420 *
421 * @fence: radeon fence object
422 *
423 * Check if the requested fence has signaled (all asics).
424 * Returns true if the fence has signaled or false if it has not.
425 */
426bool radeon_fence_signaled(struct radeon_fence *fence)
427{
428 if (!fence)
429 return true;
430
431 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
432 dma_fence_signal(&fence->base);
433 return true;
434 }
435 return false;
436}
437
438/**
439 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
440 *
441 * @rdev: radeon device pointer
442 * @seq: sequence numbers
443 *
444 * Check if the last signaled fence sequnce number is >= the requested
445 * sequence number (all asics).
446 * Returns true if any has signaled (current value is >= requested value)
447 * or false if it has not. Helper function for radeon_fence_wait_seq.
448 */
449static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
450{
451 unsigned int i;
452
453 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
454 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
455 return true;
456 }
457 return false;
458}
459
460/**
461 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
462 *
463 * @rdev: radeon device pointer
464 * @target_seq: sequence number(s) we want to wait for
465 * @intr: use interruptable sleep
466 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
467 *
468 * Wait for the requested sequence number(s) to be written by any ring
469 * (all asics). Sequnce number array is indexed by ring id.
470 * @intr selects whether to use interruptable (true) or non-interruptable
471 * (false) sleep when waiting for the sequence number. Helper function
472 * for radeon_fence_wait_*().
473 * Returns remaining time if the sequence number has passed, 0 when
474 * the wait timeout, or an error for all other cases.
475 * -EDEADLK is returned when a GPU lockup has been detected.
476 */
477static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
478 u64 *target_seq, bool intr,
479 long timeout)
480{
481 long r;
482 int i;
483
484 if (radeon_fence_any_seq_signaled(rdev, target_seq))
485 return timeout;
486
487 /* enable IRQs and tracing */
488 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
489 if (!target_seq[i])
490 continue;
491
492 trace_radeon_fence_wait_begin(rdev_to_drm(rdev), i, target_seq[i]);
493 radeon_irq_kms_sw_irq_get(rdev, i);
494 }
495
496 if (intr) {
497 r = wait_event_interruptible_timeout(rdev->fence_queue, (
498 radeon_fence_any_seq_signaled(rdev, target_seq)
499 || rdev->needs_reset), timeout);
500 } else {
501 r = wait_event_timeout(rdev->fence_queue, (
502 radeon_fence_any_seq_signaled(rdev, target_seq)
503 || rdev->needs_reset), timeout);
504 }
505
506 if (rdev->needs_reset)
507 r = -EDEADLK;
508
509 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
510 if (!target_seq[i])
511 continue;
512
513 radeon_irq_kms_sw_irq_put(rdev, i);
514 trace_radeon_fence_wait_end(rdev_to_drm(rdev), i, target_seq[i]);
515 }
516
517 return r;
518}
519
520/**
521 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
522 *
523 * @fence: radeon fence object
524 * @intr: use interruptible sleep
525 *
526 * Wait for the requested fence to signal (all asics).
527 * @intr selects whether to use interruptable (true) or non-interruptable
528 * (false) sleep when waiting for the fence.
529 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
530 * Returns remaining time if the sequence number has passed, 0 when
531 * the wait timeout, or an error for all other cases.
532 */
533long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
534{
535 uint64_t seq[RADEON_NUM_RINGS] = {};
536 long r;
537
538 /*
539 * This function should not be called on !radeon fences.
540 * If this is the case, it would mean this function can
541 * also be called on radeon fences belonging to another card.
542 * exclusive_lock is not held in that case.
543 */
544 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
545 return dma_fence_wait(&fence->base, intr);
546
547 seq[fence->ring] = fence->seq;
548 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
549 if (r <= 0)
550 return r;
551
552 dma_fence_signal(&fence->base);
553 return r;
554}
555
556/**
557 * radeon_fence_wait - wait for a fence to signal
558 *
559 * @fence: radeon fence object
560 * @intr: use interruptible sleep
561 *
562 * Wait for the requested fence to signal (all asics).
563 * @intr selects whether to use interruptable (true) or non-interruptable
564 * (false) sleep when waiting for the fence.
565 * Returns 0 if the fence has passed, error for all other cases.
566 */
567int radeon_fence_wait(struct radeon_fence *fence, bool intr)
568{
569 long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
570
571 if (r > 0)
572 return 0;
573 else
574 return r;
575}
576
577/**
578 * radeon_fence_wait_next - wait for the next fence to signal
579 *
580 * @rdev: radeon device pointer
581 * @ring: ring index the fence is associated with
582 *
583 * Wait for the next fence on the requested ring to signal (all asics).
584 * Returns 0 if the next fence has passed, error for all other cases.
585 * Caller must hold ring lock.
586 */
587int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
588{
589 uint64_t seq[RADEON_NUM_RINGS] = {};
590 long r;
591
592 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
593 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
594 /* nothing to wait for, last_seq is already
595 * the last emited fence
596 */
597 return -ENOENT;
598 }
599
600 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
601 if (r < 0)
602 return r;
603
604 return 0;
605}
606
607/**
608 * radeon_fence_wait_empty - wait for all fences to signal
609 *
610 * @rdev: radeon device pointer
611 * @ring: ring index the fence is associated with
612 *
613 * Wait for all fences on the requested ring to signal (all asics).
614 * Returns 0 if the fences have passed, error for all other cases.
615 * Caller must hold ring lock.
616 */
617int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
618{
619 uint64_t seq[RADEON_NUM_RINGS] = {};
620 long r;
621
622 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
623 if (!seq[ring])
624 return 0;
625
626 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
627 if (r < 0) {
628 if (r == -EDEADLK)
629 return -EDEADLK;
630
631 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
632 ring, r);
633 }
634 return 0;
635}
636
637/**
638 * radeon_fence_ref - take a ref on a fence
639 *
640 * @fence: radeon fence object
641 *
642 * Take a reference on a fence (all asics).
643 * Returns the fence.
644 */
645struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
646{
647 dma_fence_get(&fence->base);
648 return fence;
649}
650
651/**
652 * radeon_fence_unref - remove a ref on a fence
653 *
654 * @fence: radeon fence object
655 *
656 * Remove a reference on a fence (all asics).
657 */
658void radeon_fence_unref(struct radeon_fence **fence)
659{
660 struct radeon_fence *tmp = *fence;
661
662 *fence = NULL;
663 if (tmp)
664 dma_fence_put(&tmp->base);
665}
666
667/**
668 * radeon_fence_count_emitted - get the count of emitted fences
669 *
670 * @rdev: radeon device pointer
671 * @ring: ring index the fence is associated with
672 *
673 * Get the number of fences emitted on the requested ring (all asics).
674 * Returns the number of emitted fences on the ring. Used by the
675 * dynpm code to ring track activity.
676 */
677unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
678{
679 uint64_t emitted;
680
681 /* We are not protected by ring lock when reading the last sequence
682 * but it's ok to report slightly wrong fence count here.
683 */
684 radeon_fence_process(rdev, ring);
685 emitted = rdev->fence_drv[ring].sync_seq[ring]
686 - atomic64_read(&rdev->fence_drv[ring].last_seq);
687 /* to avoid 32bits warp around */
688 if (emitted > 0x10000000)
689 emitted = 0x10000000;
690
691 return (unsigned int)emitted;
692}
693
694/**
695 * radeon_fence_need_sync - do we need a semaphore
696 *
697 * @fence: radeon fence object
698 * @dst_ring: which ring to check against
699 *
700 * Check if the fence needs to be synced against another ring
701 * (all asics). If so, we need to emit a semaphore.
702 * Returns true if we need to sync with another ring, false if
703 * not.
704 */
705bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
706{
707 struct radeon_fence_driver *fdrv;
708
709 if (!fence)
710 return false;
711
712 if (fence->ring == dst_ring)
713 return false;
714
715 /* we are protected by the ring mutex */
716 fdrv = &fence->rdev->fence_drv[dst_ring];
717 if (fence->seq <= fdrv->sync_seq[fence->ring])
718 return false;
719
720 return true;
721}
722
723/**
724 * radeon_fence_note_sync - record the sync point
725 *
726 * @fence: radeon fence object
727 * @dst_ring: which ring to check against
728 *
729 * Note the sequence number at which point the fence will
730 * be synced with the requested ring (all asics).
731 */
732void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
733{
734 struct radeon_fence_driver *dst, *src;
735 unsigned int i;
736
737 if (!fence)
738 return;
739
740 if (fence->ring == dst_ring)
741 return;
742
743 /* we are protected by the ring mutex */
744 src = &fence->rdev->fence_drv[fence->ring];
745 dst = &fence->rdev->fence_drv[dst_ring];
746 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
747 if (i == dst_ring)
748 continue;
749
750 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
751 }
752}
753
754/**
755 * radeon_fence_driver_start_ring - make the fence driver
756 * ready for use on the requested ring.
757 *
758 * @rdev: radeon device pointer
759 * @ring: ring index to start the fence driver on
760 *
761 * Make the fence driver ready for processing (all asics).
762 * Not all asics have all rings, so each asic will only
763 * start the fence driver on the rings it has.
764 * Returns 0 for success, errors for failure.
765 */
766int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
767{
768 uint64_t index;
769 int r;
770
771 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
772 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
773 rdev->fence_drv[ring].scratch_reg = 0;
774 if (ring != R600_RING_TYPE_UVD_INDEX) {
775 index = R600_WB_EVENT_OFFSET + ring * 4;
776 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
777 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
778 index;
779
780 } else {
781 /* put fence directly behind firmware */
782 index = ALIGN(rdev->uvd_fw->size, 8);
783 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
784 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
785 }
786
787 } else {
788 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
789 if (r) {
790 dev_err(rdev->dev, "fence failed to get scratch register\n");
791 return r;
792 }
793 index = RADEON_WB_SCRATCH_OFFSET +
794 rdev->fence_drv[ring].scratch_reg -
795 rdev->scratch.reg_base;
796 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
797 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
798 }
799 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
800 rdev->fence_drv[ring].initialized = true;
801 dev_info(rdev->dev, "fence driver on ring %d uses gpu addr 0x%016llx\n",
802 ring, rdev->fence_drv[ring].gpu_addr);
803 return 0;
804}
805
806/**
807 * radeon_fence_driver_init_ring - init the fence driver
808 * for the requested ring.
809 *
810 * @rdev: radeon device pointer
811 * @ring: ring index to start the fence driver on
812 *
813 * Init the fence driver for the requested ring (all asics).
814 * Helper function for radeon_fence_driver_init().
815 */
816static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
817{
818 int i;
819
820 rdev->fence_drv[ring].scratch_reg = -1;
821 rdev->fence_drv[ring].cpu_addr = NULL;
822 rdev->fence_drv[ring].gpu_addr = 0;
823 for (i = 0; i < RADEON_NUM_RINGS; ++i)
824 rdev->fence_drv[ring].sync_seq[i] = 0;
825 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
826 rdev->fence_drv[ring].initialized = false;
827 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
828 radeon_fence_check_lockup);
829 rdev->fence_drv[ring].rdev = rdev;
830}
831
832/**
833 * radeon_fence_driver_init - init the fence driver
834 * for all possible rings.
835 *
836 * @rdev: radeon device pointer
837 *
838 * Init the fence driver for all possible rings (all asics).
839 * Not all asics have all rings, so each asic will only
840 * start the fence driver on the rings it has using
841 * radeon_fence_driver_start_ring().
842 */
843void radeon_fence_driver_init(struct radeon_device *rdev)
844{
845 int ring;
846
847 init_waitqueue_head(&rdev->fence_queue);
848 for (ring = 0; ring < RADEON_NUM_RINGS; ring++)
849 radeon_fence_driver_init_ring(rdev, ring);
850
851 radeon_debugfs_fence_init(rdev);
852}
853
854/**
855 * radeon_fence_driver_fini - tear down the fence driver
856 * for all possible rings.
857 *
858 * @rdev: radeon device pointer
859 *
860 * Tear down the fence driver for all possible rings (all asics).
861 */
862void radeon_fence_driver_fini(struct radeon_device *rdev)
863{
864 int ring, r;
865
866 mutex_lock(&rdev->ring_lock);
867 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
868 if (!rdev->fence_drv[ring].initialized)
869 continue;
870 r = radeon_fence_wait_empty(rdev, ring);
871 if (r) {
872 /* no need to trigger GPU reset as we are unloading */
873 radeon_fence_driver_force_completion(rdev, ring);
874 }
875 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
876 wake_up_all(&rdev->fence_queue);
877 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
878 rdev->fence_drv[ring].initialized = false;
879 }
880 mutex_unlock(&rdev->ring_lock);
881}
882
883/**
884 * radeon_fence_driver_force_completion - force all fence waiter to complete
885 *
886 * @rdev: radeon device pointer
887 * @ring: the ring to complete
888 *
889 * In case of GPU reset failure make sure no process keep waiting on fence
890 * that will never complete.
891 */
892void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
893{
894 if (rdev->fence_drv[ring].initialized) {
895 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
896 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
897 }
898}
899
900
901/*
902 * Fence debugfs
903 */
904#if defined(CONFIG_DEBUG_FS)
905static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data)
906{
907 struct radeon_device *rdev = m->private;
908 int i, j;
909
910 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
911 if (!rdev->fence_drv[i].initialized)
912 continue;
913
914 radeon_fence_process(rdev, i);
915
916 seq_printf(m, "--- ring %d ---\n", i);
917 seq_printf(m, "Last signaled fence 0x%016llx\n",
918 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
919 seq_printf(m, "Last emitted 0x%016llx\n",
920 rdev->fence_drv[i].sync_seq[i]);
921
922 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
923 if (i != j && rdev->fence_drv[j].initialized)
924 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
925 j, rdev->fence_drv[i].sync_seq[j]);
926 }
927 }
928 return 0;
929}
930
931/*
932 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
933 *
934 * Manually trigger a gpu reset at the next fence wait.
935 */
936static int radeon_debugfs_gpu_reset(void *data, u64 *val)
937{
938 struct radeon_device *rdev = (struct radeon_device *)data;
939
940 down_read(&rdev->exclusive_lock);
941 *val = rdev->needs_reset;
942 rdev->needs_reset = true;
943 wake_up_all(&rdev->fence_queue);
944 up_read(&rdev->exclusive_lock);
945
946 return 0;
947}
948DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_fence_info);
949DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops,
950 radeon_debugfs_gpu_reset, NULL, "%lld\n");
951#endif
952
953void radeon_debugfs_fence_init(struct radeon_device *rdev)
954{
955#if defined(CONFIG_DEBUG_FS)
956 struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
957
958 debugfs_create_file("radeon_gpu_reset", 0444, root, rdev,
959 &radeon_debugfs_gpu_reset_fops);
960 debugfs_create_file("radeon_fence_info", 0444, root, rdev,
961 &radeon_debugfs_fence_info_fops);
962
963
964#endif
965}
966
967static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
968{
969 return "radeon";
970}
971
972static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
973{
974 struct radeon_fence *fence = to_radeon_fence(f);
975
976 switch (fence->ring) {
977 case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
978 case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
979 case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
980 case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
981 case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
982 case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
983 case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
984 case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
985 default:
986 WARN_ON_ONCE(1);
987 return "radeon.unk";
988 }
989}
990
991static inline bool radeon_test_signaled(struct radeon_fence *fence)
992{
993 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
994}
995
996struct radeon_wait_cb {
997 struct dma_fence_cb base;
998 struct task_struct *task;
999};
1000
1001static void
1002radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1003{
1004 struct radeon_wait_cb *wait =
1005 container_of(cb, struct radeon_wait_cb, base);
1006
1007 wake_up_process(wait->task);
1008}
1009
1010static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1011 signed long t)
1012{
1013 struct radeon_fence *fence = to_radeon_fence(f);
1014 struct radeon_device *rdev = fence->rdev;
1015 struct radeon_wait_cb cb;
1016
1017 cb.task = current;
1018
1019 if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1020 return t;
1021
1022 while (t > 0) {
1023 if (intr)
1024 set_current_state(TASK_INTERRUPTIBLE);
1025 else
1026 set_current_state(TASK_UNINTERRUPTIBLE);
1027
1028 /*
1029 * radeon_test_signaled must be called after
1030 * set_current_state to prevent a race with wake_up_process
1031 */
1032 if (radeon_test_signaled(fence))
1033 break;
1034
1035 if (rdev->needs_reset) {
1036 t = -EDEADLK;
1037 break;
1038 }
1039
1040 t = schedule_timeout(t);
1041
1042 if (t > 0 && intr && signal_pending(current))
1043 t = -ERESTARTSYS;
1044 }
1045
1046 __set_current_state(TASK_RUNNING);
1047 dma_fence_remove_callback(f, &cb.base);
1048
1049 return t;
1050}
1051
1052const struct dma_fence_ops radeon_fence_ops = {
1053 .get_driver_name = radeon_fence_get_driver_name,
1054 .get_timeline_name = radeon_fence_get_timeline_name,
1055 .enable_signaling = radeon_fence_enable_signaling,
1056 .signaled = radeon_fence_is_signaled,
1057 .wait = radeon_fence_default_wait,
1058 .release = NULL,
1059};