2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <trace/events/dma_fence.h>
27 #include <uapi/linux/sched/types.h>
30 #include "i915_trace.h"
31 #include "intel_breadcrumbs.h"
32 #include "intel_context.h"
33 #include "intel_engine_pm.h"
34 #include "intel_gt_pm.h"
35 #include "intel_gt_requests.h"
37 static bool irq_enable(struct intel_engine_cs *engine)
39 if (!engine->irq_enable)
42 /* Caller disables interrupts */
43 spin_lock(&engine->gt->irq_lock);
44 engine->irq_enable(engine);
45 spin_unlock(&engine->gt->irq_lock);
50 static void irq_disable(struct intel_engine_cs *engine)
52 if (!engine->irq_disable)
55 /* Caller disables interrupts */
56 spin_lock(&engine->gt->irq_lock);
57 engine->irq_disable(engine);
58 spin_unlock(&engine->gt->irq_lock);
61 static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
64 * Since we are waiting on a request, the GPU should be busy
65 * and should have its own rpm reference.
67 if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
71 * The breadcrumb irq will be disarmed on the interrupt after the
72 * waiters are signaled. This gives us a single interrupt window in
73 * which we can add a new waiter and avoid the cost of re-enabling
76 WRITE_ONCE(b->irq_armed, true);
78 /* Requests may have completed before we could enable the interrupt. */
79 if (!b->irq_enabled++ && irq_enable(b->irq_engine))
80 irq_work_queue(&b->irq_work);
83 static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
88 spin_lock(&b->irq_lock);
90 __intel_breadcrumbs_arm_irq(b);
91 spin_unlock(&b->irq_lock);
94 static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
96 GEM_BUG_ON(!b->irq_enabled);
97 if (!--b->irq_enabled)
98 irq_disable(b->irq_engine);
100 WRITE_ONCE(b->irq_armed, false);
101 intel_gt_pm_put_async(b->irq_engine->gt);
104 static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
106 spin_lock(&b->irq_lock);
108 __intel_breadcrumbs_disarm_irq(b);
109 spin_unlock(&b->irq_lock);
112 static void add_signaling_context(struct intel_breadcrumbs *b,
113 struct intel_context *ce)
115 lockdep_assert_held(&ce->signal_lock);
117 spin_lock(&b->signalers_lock);
118 list_add_rcu(&ce->signal_link, &b->signalers);
119 spin_unlock(&b->signalers_lock);
122 static bool remove_signaling_context(struct intel_breadcrumbs *b,
123 struct intel_context *ce)
125 lockdep_assert_held(&ce->signal_lock);
127 if (!list_empty(&ce->signals))
130 spin_lock(&b->signalers_lock);
131 list_del_rcu(&ce->signal_link);
132 spin_unlock(&b->signalers_lock);
137 __maybe_unused static bool
138 check_signal_order(struct intel_context *ce, struct i915_request *rq)
140 if (rq->context != ce)
143 if (!list_is_last(&rq->signal_link, &ce->signals) &&
144 i915_seqno_passed(rq->fence.seqno,
145 list_next_entry(rq, signal_link)->fence.seqno))
148 if (!list_is_first(&rq->signal_link, &ce->signals) &&
149 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
157 __dma_fence_signal(struct dma_fence *fence)
159 return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
163 __dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
165 fence->timestamp = timestamp;
166 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
167 trace_dma_fence_signaled(fence);
171 __dma_fence_signal__notify(struct dma_fence *fence,
172 const struct list_head *list)
174 struct dma_fence_cb *cur, *tmp;
176 lockdep_assert_held(fence->lock);
178 list_for_each_entry_safe(cur, tmp, list, node) {
179 INIT_LIST_HEAD(&cur->node);
180 cur->func(fence, cur);
184 static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
187 intel_engine_add_retire(b->irq_engine, tl);
190 static bool __signal_request(struct i915_request *rq)
192 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
194 if (!__dma_fence_signal(&rq->fence)) {
195 i915_request_put(rq);
202 static struct llist_node *
203 slist_add(struct llist_node *node, struct llist_node *head)
209 static void signal_irq_work(struct irq_work *work)
211 struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
212 const ktime_t timestamp = ktime_get();
213 struct llist_node *signal, *sn;
214 struct intel_context *ce;
217 if (unlikely(!llist_empty(&b->signaled_requests)))
218 signal = llist_del_all(&b->signaled_requests);
221 * Keep the irq armed until the interrupt after all listeners are gone.
223 * Enabling/disabling the interrupt is rather costly, roughly a couple
224 * of hundred microseconds. If we are proactive and enable/disable
225 * the interrupt around every request that wants a breadcrumb, we
226 * quickly drown in the extra orders of magnitude of latency imposed
227 * on request submission.
229 * So we try to be lazy, and keep the interrupts enabled until no
230 * more listeners appear within a breadcrumb interrupt interval (that
231 * is until a request completes that no one cares about). The
232 * observation is that listeners come in batches, and will often
233 * listen to a bunch of requests in succession. Though note on icl+,
234 * interrupts are always enabled due to concerns with rc6 being
235 * dysfunctional with per-engine interrupt masking.
237 * We also try to avoid raising too many interrupts, as they may
238 * be generated by userspace batches and it is unfortunately rather
239 * too easy to drown the CPU under a flood of GPU interrupts. Thus
240 * whenever no one appears to be listening, we turn off the interrupts.
241 * Fewer interrupts should conserve power -- at the very least, fewer
242 * interrupt draw less ire from other users of the system and tools
245 if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers))
246 intel_breadcrumbs_disarm_irq(b);
249 list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
250 struct i915_request *rq;
252 list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
255 if (!__i915_request_is_complete(rq))
258 if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
263 * Queue for execution after dropping the signaling
264 * spinlock as the callback chain may end up adding
265 * more signalers to the same context or engine.
267 spin_lock(&ce->signal_lock);
268 list_del_rcu(&rq->signal_link);
269 release = remove_signaling_context(b, ce);
270 spin_unlock(&ce->signal_lock);
272 if (__signal_request(rq))
273 /* We own signal_node now, xfer to local list */
274 signal = slist_add(&rq->signal_node, signal);
277 add_retire(b, ce->timeline);
278 intel_context_put(ce);
284 llist_for_each_safe(signal, sn, signal) {
285 struct i915_request *rq =
286 llist_entry(signal, typeof(*rq), signal_node);
287 struct list_head cb_list;
289 spin_lock(&rq->lock);
290 list_replace(&rq->fence.cb_list, &cb_list);
291 __dma_fence_signal__timestamp(&rq->fence, timestamp);
292 __dma_fence_signal__notify(&rq->fence, &cb_list);
293 spin_unlock(&rq->lock);
295 i915_request_put(rq);
298 if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
299 intel_breadcrumbs_arm_irq(b);
302 struct intel_breadcrumbs *
303 intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
305 struct intel_breadcrumbs *b;
307 b = kzalloc(sizeof(*b), GFP_KERNEL);
311 b->irq_engine = irq_engine;
313 spin_lock_init(&b->signalers_lock);
314 INIT_LIST_HEAD(&b->signalers);
315 init_llist_head(&b->signaled_requests);
317 spin_lock_init(&b->irq_lock);
318 init_irq_work(&b->irq_work, signal_irq_work);
323 void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
330 spin_lock_irqsave(&b->irq_lock, flags);
333 irq_enable(b->irq_engine);
335 irq_disable(b->irq_engine);
337 spin_unlock_irqrestore(&b->irq_lock, flags);
340 void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
342 /* Kick the work once more to drain the signalers */
343 irq_work_sync(&b->irq_work);
344 while (unlikely(READ_ONCE(b->irq_armed))) {
346 signal_irq_work(&b->irq_work);
350 GEM_BUG_ON(!list_empty(&b->signalers));
353 void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
355 irq_work_sync(&b->irq_work);
356 GEM_BUG_ON(!list_empty(&b->signalers));
357 GEM_BUG_ON(b->irq_armed);
361 static void insert_breadcrumb(struct i915_request *rq)
363 struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
364 struct intel_context *ce = rq->context;
365 struct list_head *pos;
367 if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
370 i915_request_get(rq);
373 * If the request is already completed, we can transfer it
374 * straight onto a signaled list, and queue the irq worker for
375 * its signal completion.
377 if (__i915_request_is_complete(rq)) {
378 if (__signal_request(rq) &&
379 llist_add(&rq->signal_node, &b->signaled_requests))
380 irq_work_queue(&b->irq_work);
384 if (list_empty(&ce->signals)) {
385 intel_context_get(ce);
386 add_signaling_context(b, ce);
390 * We keep the seqno in retirement order, so we can break
391 * inside intel_engine_signal_breadcrumbs as soon as we've
392 * passed the last completed request (or seen a request that
393 * hasn't event started). We could walk the timeline->requests,
394 * but keeping a separate signalers_list has the advantage of
395 * hopefully being much smaller than the full list and so
396 * provides faster iteration and detection when there are no
397 * more interrupts required for this context.
399 * We typically expect to add new signalers in order, so we
400 * start looking for our insertion point from the tail of
403 list_for_each_prev(pos, &ce->signals) {
404 struct i915_request *it =
405 list_entry(pos, typeof(*it), signal_link);
407 if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
411 list_add_rcu(&rq->signal_link, pos);
412 GEM_BUG_ON(!check_signal_order(ce, rq));
413 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
414 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
417 * Defer enabling the interrupt to after HW submission and recheck
418 * the request as it may have completed and raised the interrupt as
419 * we were attaching it into the lists.
421 irq_work_queue(&b->irq_work);
424 bool i915_request_enable_breadcrumb(struct i915_request *rq)
426 struct intel_context *ce = rq->context;
428 /* Serialises with i915_request_retire() using rq->lock */
429 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
433 * Peek at i915_request_submit()/i915_request_unsubmit() status.
435 * If the request is not yet active (and not signaled), we will
436 * attach the breadcrumb later.
438 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
441 spin_lock(&ce->signal_lock);
442 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
443 insert_breadcrumb(rq);
444 spin_unlock(&ce->signal_lock);
449 void i915_request_cancel_breadcrumb(struct i915_request *rq)
451 struct intel_context *ce = rq->context;
454 if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
457 spin_lock(&ce->signal_lock);
458 list_del_rcu(&rq->signal_link);
459 release = remove_signaling_context(rq->engine->breadcrumbs, ce);
460 spin_unlock(&ce->signal_lock);
462 intel_context_put(ce);
464 i915_request_put(rq);
467 static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
469 struct intel_context *ce;
470 struct i915_request *rq;
472 drm_printf(p, "Signals:\n");
475 list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
476 list_for_each_entry_rcu(rq, &ce->signals, signal_link)
477 drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
478 rq->fence.context, rq->fence.seqno,
479 i915_request_completed(rq) ? "!" :
480 i915_request_started(rq) ? "*" :
482 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
487 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
488 struct drm_printer *p)
490 struct intel_breadcrumbs *b;
492 b = engine->breadcrumbs;
496 drm_printf(p, "IRQ: %s\n", enableddisabled(b->irq_armed));
497 if (!list_empty(&b->signalers))