2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
32 struct intel_wait *wait;
33 unsigned int result = 0;
35 lockdep_assert_held(&b->irq_lock);
39 result = ENGINE_WAKEUP_WAITER;
40 if (wake_up_process(wait->tsk))
41 result |= ENGINE_WAKEUP_ASLEEP;
47 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
49 struct intel_breadcrumbs *b = &engine->breadcrumbs;
53 spin_lock_irqsave(&b->irq_lock, flags);
54 result = __intel_breadcrumbs_wakeup(b);
55 spin_unlock_irqrestore(&b->irq_lock, flags);
60 static unsigned long wait_timeout(void)
62 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
65 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
67 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n",
68 engine->name, __builtin_return_address(0),
69 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
70 &engine->irq_posted)),
71 intel_engine_get_seqno(engine),
72 intel_engine_last_submit(engine));
74 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
77 static void intel_breadcrumbs_hangcheck(unsigned long data)
79 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
80 struct intel_breadcrumbs *b = &engine->breadcrumbs;
85 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
86 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
87 mod_timer(&b->hangcheck, wait_timeout());
91 /* We keep the hangcheck timer alive until we disarm the irq, even
92 * if there are no waiters at present.
94 * If the waiter was currently running, assume it hasn't had a chance
95 * to process the pending interrupt (e.g, low priority task on a loaded
96 * system) and wait until it sleeps before declaring a missed interrupt.
98 * If the waiter was asleep (and not even pending a wakeup), then we
99 * must have missed an interrupt as the GPU has stopped advancing
100 * but we still have a waiter. Assuming all batches complete within
101 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
103 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
104 missed_breadcrumb(engine);
105 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
107 mod_timer(&b->hangcheck, wait_timeout());
111 static void intel_breadcrumbs_fake_irq(unsigned long data)
113 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
114 struct intel_breadcrumbs *b = &engine->breadcrumbs;
116 /* The timer persists in case we cannot enable interrupts,
117 * or if we have previously seen seqno/interrupt incoherency
118 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
119 * Here the worker will wake up every jiffie in order to kick the
120 * oldest waiter to do the coherent seqno check.
123 spin_lock_irq(&b->irq_lock);
124 if (!__intel_breadcrumbs_wakeup(b))
125 __intel_engine_disarm_breadcrumbs(engine);
126 spin_unlock_irq(&b->irq_lock);
130 mod_timer(&b->fake_irq, jiffies + 1);
132 /* Ensure that even if the GPU hangs, we get woken up.
134 * However, note that if no one is waiting, we never notice
135 * a gpu hang. Eventually, we will have to wait for a resource
136 * held by the GPU and so trigger a hangcheck. In the most
137 * pathological case, this will be upon memory starvation! To
138 * prevent this, we also queue the hangcheck from the retire
141 i915_queue_hangcheck(engine->i915);
144 static void irq_enable(struct intel_engine_cs *engine)
146 /* Enabling the IRQ may miss the generation of the interrupt, but
147 * we still need to force the barrier before reading the seqno,
150 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
152 /* Caller disables interrupts */
153 spin_lock(&engine->i915->irq_lock);
154 engine->irq_enable(engine);
155 spin_unlock(&engine->i915->irq_lock);
158 static void irq_disable(struct intel_engine_cs *engine)
160 /* Caller disables interrupts */
161 spin_lock(&engine->i915->irq_lock);
162 engine->irq_disable(engine);
163 spin_unlock(&engine->i915->irq_lock);
166 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
168 struct intel_breadcrumbs *b = &engine->breadcrumbs;
170 lockdep_assert_held(&b->irq_lock);
171 GEM_BUG_ON(b->irq_wait);
173 if (b->irq_enabled) {
175 b->irq_enabled = false;
178 b->irq_armed = false;
181 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
183 struct intel_breadcrumbs *b = &engine->breadcrumbs;
184 struct intel_wait *wait, *n, *first;
189 /* We only disarm the irq when we are idle (all requests completed),
190 * so if the bottom-half remains asleep, it missed the request
194 spin_lock_irq(&b->rb_lock);
196 spin_lock(&b->irq_lock);
197 first = fetch_and_zero(&b->irq_wait);
198 __intel_engine_disarm_breadcrumbs(engine);
199 spin_unlock(&b->irq_lock);
201 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
202 RB_CLEAR_NODE(&wait->node);
203 if (wake_up_process(wait->tsk) && wait == first)
204 missed_breadcrumb(engine);
206 b->waiters = RB_ROOT;
208 spin_unlock_irq(&b->rb_lock);
211 static bool use_fake_irq(const struct intel_breadcrumbs *b)
213 const struct intel_engine_cs *engine =
214 container_of(b, struct intel_engine_cs, breadcrumbs);
216 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
219 /* Only start with the heavy weight fake irq timer if we have not
220 * seen any interrupts since enabling it the first time. If the
221 * interrupts are still arriving, it means we made a mistake in our
222 * engine->seqno_barrier(), a timing error that should be transient
223 * and unlikely to reoccur.
225 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
228 static void enable_fake_irq(struct intel_breadcrumbs *b)
230 /* Ensure we never sleep indefinitely */
231 if (!b->irq_enabled || use_fake_irq(b))
232 mod_timer(&b->fake_irq, jiffies + 1);
234 mod_timer(&b->hangcheck, wait_timeout());
237 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
239 struct intel_engine_cs *engine =
240 container_of(b, struct intel_engine_cs, breadcrumbs);
241 struct drm_i915_private *i915 = engine->i915;
243 lockdep_assert_held(&b->irq_lock);
247 /* The breadcrumb irq will be disarmed on the interrupt after the
248 * waiters are signaled. This gives us a single interrupt window in
249 * which we can add a new waiter and avoid the cost of re-enabling
253 GEM_BUG_ON(b->irq_enabled);
255 if (I915_SELFTEST_ONLY(b->mock)) {
256 /* For our mock objects we want to avoid interaction
257 * with the real hardware (which is not set up). So
258 * we simply pretend we have enabled the powerwell
259 * and the irq, and leave it up to the mock
260 * implementation to call intel_engine_wakeup()
261 * itself when it wants to simulate a user interrupt,
266 /* Since we are waiting on a request, the GPU should be busy
267 * and should have its own rpm reference. This is tracked
268 * by i915->gt.awake, we can forgo holding our own wakref
269 * for the interrupt as before i915->gt.awake is released (when
270 * the driver is idle) we disarm the breadcrumbs.
273 /* No interrupts? Kick the waiter every jiffie! */
274 if (intel_irqs_enabled(i915)) {
275 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
277 b->irq_enabled = true;
284 static inline struct intel_wait *to_wait(struct rb_node *node)
286 return rb_entry(node, struct intel_wait, node);
289 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
290 struct intel_wait *wait)
292 lockdep_assert_held(&b->rb_lock);
293 GEM_BUG_ON(b->irq_wait == wait);
295 /* This request is completed, so remove it from the tree, mark it as
296 * complete, and *then* wake up the associated task. N.B. when the
297 * task wakes up, it will find the empty rb_node, discern that it
298 * has already been removed from the tree and skip the serialisation
299 * of the b->rb_lock and b->irq_lock. This means that the destruction
300 * of the intel_wait is not serialised with the interrupt handler
301 * by the waiter - it must instead be serialised by the caller.
303 rb_erase(&wait->node, &b->waiters);
304 RB_CLEAR_NODE(&wait->node);
306 wake_up_process(wait->tsk); /* implicit smp_wmb() */
309 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
310 struct rb_node *next)
312 struct intel_breadcrumbs *b = &engine->breadcrumbs;
314 spin_lock(&b->irq_lock);
315 GEM_BUG_ON(!b->irq_armed);
316 GEM_BUG_ON(!b->irq_wait);
317 b->irq_wait = to_wait(next);
318 spin_unlock(&b->irq_lock);
320 /* We always wake up the next waiter that takes over as the bottom-half
321 * as we may delegate not only the irq-seqno barrier to the next waiter
322 * but also the task of waking up concurrent waiters.
325 wake_up_process(to_wait(next)->tsk);
328 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
329 struct intel_wait *wait)
331 struct intel_breadcrumbs *b = &engine->breadcrumbs;
332 struct rb_node **p, *parent, *completed;
336 /* Insert the request into the retirement ordered list
337 * of waiters by walking the rbtree. If we are the oldest
338 * seqno in the tree (the first to be retired), then
339 * set ourselves as the bottom-half.
341 * As we descend the tree, prune completed branches since we hold the
342 * spinlock we know that the first_waiter must be delayed and can
343 * reduce some of the sequential wake up latency if we take action
344 * ourselves and wake up the completed tasks in parallel. Also, by
345 * removing stale elements in the tree, we may be able to reduce the
346 * ping-pong between the old bottom-half and ourselves as first-waiter.
352 seqno = intel_engine_get_seqno(engine);
354 /* If the request completed before we managed to grab the spinlock,
355 * return now before adding ourselves to the rbtree. We let the
356 * current bottom-half handle any pending wakeups and instead
357 * try and get out of the way quickly.
359 if (i915_seqno_passed(seqno, wait->seqno)) {
360 RB_CLEAR_NODE(&wait->node);
364 p = &b->waiters.rb_node;
367 if (wait->seqno == to_wait(parent)->seqno) {
368 /* We have multiple waiters on the same seqno, select
369 * the highest priority task (that with the smallest
370 * task->prio) to serve as the bottom-half for this
373 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
374 p = &parent->rb_right;
377 p = &parent->rb_left;
379 } else if (i915_seqno_passed(wait->seqno,
380 to_wait(parent)->seqno)) {
381 p = &parent->rb_right;
382 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
387 p = &parent->rb_left;
390 rb_link_node(&wait->node, parent, p);
391 rb_insert_color(&wait->node, &b->waiters);
394 spin_lock(&b->irq_lock);
396 /* After assigning ourselves as the new bottom-half, we must
397 * perform a cursory check to prevent a missed interrupt.
398 * Either we miss the interrupt whilst programming the hardware,
399 * or if there was a previous waiter (for a later seqno) they
400 * may be woken instead of us (due to the inherent race
401 * in the unlocked read of b->irq_seqno_bh in the irq handler)
402 * and so we miss the wake up.
404 armed = __intel_breadcrumbs_enable_irq(b);
405 spin_unlock(&b->irq_lock);
409 /* Advance the bottom-half (b->irq_wait) before we wake up
410 * the waiters who may scribble over their intel_wait
411 * just as the interrupt handler is dereferencing it via
415 struct rb_node *next = rb_next(completed);
416 GEM_BUG_ON(next == &wait->node);
417 __intel_breadcrumbs_next(engine, next);
421 struct intel_wait *crumb = to_wait(completed);
422 completed = rb_prev(completed);
423 __intel_breadcrumbs_finish(b, crumb);
427 GEM_BUG_ON(!b->irq_wait);
428 GEM_BUG_ON(!b->irq_armed);
429 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
434 bool intel_engine_add_wait(struct intel_engine_cs *engine,
435 struct intel_wait *wait)
437 struct intel_breadcrumbs *b = &engine->breadcrumbs;
440 spin_lock_irq(&b->rb_lock);
441 armed = __intel_engine_add_wait(engine, wait);
442 spin_unlock_irq(&b->rb_lock);
446 /* Make the caller recheck if its request has already started. */
447 return i915_seqno_passed(intel_engine_get_seqno(engine),
451 static inline bool chain_wakeup(struct rb_node *rb, int priority)
453 return rb && to_wait(rb)->tsk->prio <= priority;
456 static inline int wakeup_priority(struct intel_breadcrumbs *b,
457 struct task_struct *tsk)
459 if (tsk == b->signaler)
465 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
466 struct intel_wait *wait)
468 struct intel_breadcrumbs *b = &engine->breadcrumbs;
470 lockdep_assert_held(&b->rb_lock);
472 if (RB_EMPTY_NODE(&wait->node))
475 if (b->irq_wait == wait) {
476 const int priority = wakeup_priority(b, wait->tsk);
477 struct rb_node *next;
479 /* We are the current bottom-half. Find the next candidate,
480 * the first waiter in the queue on the remaining oldest
481 * request. As multiple seqnos may complete in the time it
482 * takes us to wake up and find the next waiter, we have to
483 * wake up that waiter for it to perform its own coherent
486 next = rb_next(&wait->node);
487 if (chain_wakeup(next, priority)) {
488 /* If the next waiter is already complete,
489 * wake it up and continue onto the next waiter. So
490 * if have a small herd, they will wake up in parallel
491 * rather than sequentially, which should reduce
492 * the overall latency in waking all the completed
495 * However, waking up a chain adds extra latency to
496 * the first_waiter. This is undesirable if that
497 * waiter is a high priority task.
499 u32 seqno = intel_engine_get_seqno(engine);
501 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
502 struct rb_node *n = rb_next(next);
504 __intel_breadcrumbs_finish(b, to_wait(next));
506 if (!chain_wakeup(next, priority))
511 __intel_breadcrumbs_next(engine, next);
513 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
516 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
517 rb_erase(&wait->node, &b->waiters);
520 GEM_BUG_ON(b->irq_wait == wait);
521 GEM_BUG_ON(rb_first(&b->waiters) !=
522 (b->irq_wait ? &b->irq_wait->node : NULL));
525 void intel_engine_remove_wait(struct intel_engine_cs *engine,
526 struct intel_wait *wait)
528 struct intel_breadcrumbs *b = &engine->breadcrumbs;
530 /* Quick check to see if this waiter was already decoupled from
531 * the tree by the bottom-half to avoid contention on the spinlock
534 if (RB_EMPTY_NODE(&wait->node)) {
535 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
539 spin_lock_irq(&b->rb_lock);
540 __intel_engine_remove_wait(engine, wait);
541 spin_unlock_irq(&b->rb_lock);
544 static bool signal_valid(const struct drm_i915_gem_request *request)
546 return intel_wait_check_request(&request->signaling.wait, request);
549 static bool signal_complete(const struct drm_i915_gem_request *request)
554 /* If another process served as the bottom-half it may have already
555 * signalled that this wait is already completed.
557 if (intel_wait_complete(&request->signaling.wait))
558 return signal_valid(request);
560 /* Carefully check if the request is complete, giving time for the
561 * seqno to be visible or if the GPU hung.
563 if (__i915_request_irq_complete(request))
569 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
571 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
574 static void signaler_set_rtpriority(void)
576 struct sched_param param = { .sched_priority = 1 };
578 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
581 static int intel_breadcrumbs_signaler(void *arg)
583 struct intel_engine_cs *engine = arg;
584 struct intel_breadcrumbs *b = &engine->breadcrumbs;
585 struct drm_i915_gem_request *request;
587 /* Install ourselves with high priority to reduce signalling latency */
588 signaler_set_rtpriority();
591 bool do_schedule = true;
593 set_current_state(TASK_INTERRUPTIBLE);
595 /* We are either woken up by the interrupt bottom-half,
596 * or by a client adding a new signaller. In both cases,
597 * the GPU seqno may have advanced beyond our oldest signal.
598 * If it has, propagate the signal, remove the waiter and
599 * check again with the next oldest signal. Otherwise we
600 * need to wait for a new interrupt from the GPU or for
604 request = rcu_dereference(b->first_signal);
606 request = i915_gem_request_get_rcu(request);
608 if (signal_complete(request)) {
610 dma_fence_signal(&request->fence);
611 local_bh_enable(); /* kick start the tasklets */
613 spin_lock_irq(&b->rb_lock);
615 /* Wake up all other completed waiters and select the
616 * next bottom-half for the next user interrupt.
618 __intel_engine_remove_wait(engine,
619 &request->signaling.wait);
621 /* Find the next oldest signal. Note that as we have
622 * not been holding the lock, another client may
623 * have installed an even older signal than the one
624 * we just completed - so double check we are still
625 * the oldest before picking the next one.
627 if (request == rcu_access_pointer(b->first_signal)) {
629 rb_next(&request->signaling.node);
630 rcu_assign_pointer(b->first_signal,
631 rb ? to_signaler(rb) : NULL);
633 rb_erase(&request->signaling.node, &b->signals);
634 RB_CLEAR_NODE(&request->signaling.node);
636 spin_unlock_irq(&b->rb_lock);
638 i915_gem_request_put(request);
640 /* If the engine is saturated we may be continually
641 * processing completed requests. This angers the
642 * NMI watchdog if we never let anything else
643 * have access to the CPU. Let's pretend to be nice
644 * and relinquish the CPU if we burn through the
645 * entire RT timeslice!
647 do_schedule = need_resched();
650 if (unlikely(do_schedule)) {
653 if (kthread_should_park())
656 if (kthread_should_stop()) {
662 add_wait_queue(&request->execute, &exec);
667 remove_wait_queue(&request->execute, &exec);
669 i915_gem_request_put(request);
671 __set_current_state(TASK_RUNNING);
676 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
679 struct intel_engine_cs *engine = request->engine;
680 struct intel_breadcrumbs *b = &engine->breadcrumbs;
683 /* Note that we may be called from an interrupt handler on another
684 * device (e.g. nouveau signaling a fence completion causing us
685 * to submit a request, and so enable signaling). As such,
686 * we need to make sure that all other users of b->rb_lock protect
687 * against interrupts, i.e. use spin_lock_irqsave.
690 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
691 GEM_BUG_ON(!irqs_disabled());
692 lockdep_assert_held(&request->lock);
694 seqno = i915_gem_request_global_seqno(request);
698 request->signaling.wait.tsk = b->signaler;
699 request->signaling.wait.request = request;
700 request->signaling.wait.seqno = seqno;
701 i915_gem_request_get(request);
703 spin_lock(&b->rb_lock);
705 /* First add ourselves into the list of waiters, but register our
706 * bottom-half as the signaller thread. As per usual, only the oldest
707 * waiter (not just signaller) is tasked as the bottom-half waking
708 * up all completed waiters after the user interrupt.
710 * If we are the oldest waiter, enable the irq (after which we
711 * must double check that the seqno did not complete).
713 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
715 if (!__i915_gem_request_completed(request, seqno)) {
716 struct rb_node *parent, **p;
719 /* Now insert ourselves into the retirement ordered list of
720 * signals on this engine. We track the oldest seqno as that
721 * will be the first signal to complete.
725 p = &b->signals.rb_node;
728 if (i915_seqno_passed(seqno,
729 to_signaler(parent)->signaling.wait.seqno)) {
730 p = &parent->rb_right;
733 p = &parent->rb_left;
736 rb_link_node(&request->signaling.node, parent, p);
737 rb_insert_color(&request->signaling.node, &b->signals);
739 rcu_assign_pointer(b->first_signal, request);
741 __intel_engine_remove_wait(engine, &request->signaling.wait);
742 i915_gem_request_put(request);
746 spin_unlock(&b->rb_lock);
749 wake_up_process(b->signaler);
752 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
754 struct intel_engine_cs *engine = request->engine;
755 struct intel_breadcrumbs *b = &engine->breadcrumbs;
757 GEM_BUG_ON(!irqs_disabled());
758 lockdep_assert_held(&request->lock);
759 GEM_BUG_ON(!request->signaling.wait.seqno);
761 spin_lock(&b->rb_lock);
763 if (!RB_EMPTY_NODE(&request->signaling.node)) {
764 if (request == rcu_access_pointer(b->first_signal)) {
766 rb_next(&request->signaling.node);
767 rcu_assign_pointer(b->first_signal,
768 rb ? to_signaler(rb) : NULL);
770 rb_erase(&request->signaling.node, &b->signals);
771 RB_CLEAR_NODE(&request->signaling.node);
772 i915_gem_request_put(request);
775 __intel_engine_remove_wait(engine, &request->signaling.wait);
777 spin_unlock(&b->rb_lock);
779 request->signaling.wait.seqno = 0;
782 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
784 struct intel_breadcrumbs *b = &engine->breadcrumbs;
785 struct task_struct *tsk;
787 spin_lock_init(&b->rb_lock);
788 spin_lock_init(&b->irq_lock);
790 setup_timer(&b->fake_irq,
791 intel_breadcrumbs_fake_irq,
792 (unsigned long)engine);
793 setup_timer(&b->hangcheck,
794 intel_breadcrumbs_hangcheck,
795 (unsigned long)engine);
797 /* Spawn a thread to provide a common bottom-half for all signals.
798 * As this is an asynchronous interface we cannot steal the current
799 * task for handling the bottom-half to the user interrupt, therefore
800 * we create a thread to do the coherent seqno dance after the
801 * interrupt and then signal the waitqueue (via the dma-buf/fence).
803 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
804 "i915/signal:%d", engine->id);
813 static void cancel_fake_irq(struct intel_engine_cs *engine)
815 struct intel_breadcrumbs *b = &engine->breadcrumbs;
817 del_timer_sync(&b->hangcheck);
818 del_timer_sync(&b->fake_irq);
819 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
822 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
824 struct intel_breadcrumbs *b = &engine->breadcrumbs;
826 cancel_fake_irq(engine);
827 spin_lock_irq(&b->irq_lock);
834 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
835 * GPU is active and may have already executed the MI_USER_INTERRUPT
836 * before the CPU is ready to receive. However, the engine is currently
837 * idle (we haven't started it yet), there is no possibility for a
838 * missed interrupt as we enabled the irq and so we can clear the
839 * immediate wakeup (until a real interrupt arrives for the waiter).
841 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
846 spin_unlock_irq(&b->irq_lock);
849 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
851 struct intel_breadcrumbs *b = &engine->breadcrumbs;
853 /* The engines should be idle and all requests accounted for! */
854 WARN_ON(READ_ONCE(b->irq_wait));
855 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
856 WARN_ON(rcu_access_pointer(b->first_signal));
857 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
859 if (!IS_ERR_OR_NULL(b->signaler))
860 kthread_stop(b->signaler);
862 cancel_fake_irq(engine);
865 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
867 struct intel_breadcrumbs *b = &engine->breadcrumbs;
870 spin_lock_irq(&b->rb_lock);
873 wake_up_process(b->irq_wait->tsk);
877 if (rcu_access_pointer(b->first_signal)) {
878 wake_up_process(b->signaler);
882 spin_unlock_irq(&b->rb_lock);
887 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
888 #include "selftests/intel_breadcrumbs.c"