2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
32 #include "gem/i915_gem_context.h"
33 #include "gt/intel_context.h"
35 #include "i915_active.h"
37 #include "i915_globals.h"
38 #include "i915_trace.h"
42 struct list_head link;
44 struct i915_sw_fence *fence;
45 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
46 struct i915_request *signal;
49 static struct i915_global_request {
50 struct i915_global base;
51 struct kmem_cache *slab_requests;
52 struct kmem_cache *slab_dependencies;
53 struct kmem_cache *slab_execute_cbs;
56 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
61 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
64 * The timeline struct (as part of the ppgtt underneath a context)
65 * may be freed when the request is no longer in use by the GPU.
66 * We could extend the life of a context to beyond that of all
67 * fences, possibly keeping the hw resource around indefinitely,
68 * or we just give them a false name. Since
69 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
70 * lie seems justifiable.
72 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
75 return to_request(fence)->gem_context->name ?: "[i915]";
78 static bool i915_fence_signaled(struct dma_fence *fence)
80 return i915_request_completed(to_request(fence));
83 static bool i915_fence_enable_signaling(struct dma_fence *fence)
85 return i915_request_enable_breadcrumb(to_request(fence));
88 static signed long i915_fence_wait(struct dma_fence *fence,
92 return i915_request_wait(to_request(fence),
93 interruptible | I915_WAIT_PRIORITY,
97 static void i915_fence_release(struct dma_fence *fence)
99 struct i915_request *rq = to_request(fence);
102 * The request is put onto a RCU freelist (i.e. the address
103 * is immediately reused), mark the fences as being freed now.
104 * Otherwise the debugobjects for the fences are only marked as
105 * freed when the slab cache itself is freed, and so we would get
106 * caught trying to reuse dead objects.
108 i915_sw_fence_fini(&rq->submit);
109 i915_sw_fence_fini(&rq->semaphore);
111 kmem_cache_free(global.slab_requests, rq);
114 const struct dma_fence_ops i915_fence_ops = {
115 .get_driver_name = i915_fence_get_driver_name,
116 .get_timeline_name = i915_fence_get_timeline_name,
117 .enable_signaling = i915_fence_enable_signaling,
118 .signaled = i915_fence_signaled,
119 .wait = i915_fence_wait,
120 .release = i915_fence_release,
123 static void irq_execute_cb(struct irq_work *wrk)
125 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
127 i915_sw_fence_complete(cb->fence);
128 kmem_cache_free(global.slab_execute_cbs, cb);
131 static void irq_execute_cb_hook(struct irq_work *wrk)
133 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
135 cb->hook(container_of(cb->fence, struct i915_request, submit),
137 i915_request_put(cb->signal);
142 static void __notify_execute_cb(struct i915_request *rq)
144 struct execute_cb *cb;
146 lockdep_assert_held(&rq->lock);
148 if (list_empty(&rq->execute_cb))
151 list_for_each_entry(cb, &rq->execute_cb, link)
152 irq_work_queue(&cb->work);
155 * XXX Rollback on __i915_request_unsubmit()
157 * In the future, perhaps when we have an active time-slicing scheduler,
158 * it will be interesting to unsubmit parallel execution and remove
159 * busywaits from the GPU until their master is restarted. This is
160 * quite hairy, we have to carefully rollback the fence and do a
161 * preempt-to-idle cycle on the target engine, all the while the
162 * master execute_cb may refire.
164 INIT_LIST_HEAD(&rq->execute_cb);
168 remove_from_client(struct i915_request *request)
170 struct drm_i915_file_private *file_priv;
172 if (!READ_ONCE(request->file_priv))
176 file_priv = xchg(&request->file_priv, NULL);
178 spin_lock(&file_priv->mm.lock);
179 list_del(&request->client_link);
180 spin_unlock(&file_priv->mm.lock);
185 static void free_capture_list(struct i915_request *request)
187 struct i915_capture_list *capture;
189 capture = request->capture_list;
191 struct i915_capture_list *next = capture->next;
198 static bool i915_request_retire(struct i915_request *rq)
200 struct i915_active_request *active, *next;
202 lockdep_assert_held(&rq->timeline->mutex);
203 if (!i915_request_completed(rq))
206 GEM_TRACE("%s fence %llx:%lld, current %d\n",
208 rq->fence.context, rq->fence.seqno,
211 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
212 trace_i915_request_retire(rq);
215 * We know the GPU must have read the request to have
216 * sent us the seqno + interrupt, so use the position
217 * of tail of the request to update the last known position
220 * Note this requires that we are always called in request
223 GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
224 rq->ring->head = rq->postfix;
227 * Walk through the active list, calling retire on each. This allows
228 * objects to track their GPU activity and mark themselves as idle
229 * when their *last* active request is completed (updating state
230 * tracking lists for eviction, active references for GEM, etc).
232 * As the ->retire() may free the node, we decouple it first and
233 * pass along the auxiliary information (to avoid dereferencing
234 * the node after the callback).
236 list_for_each_entry_safe(active, next, &rq->active_list, link) {
238 * In microbenchmarks or focusing upon time inside the kernel,
239 * we may spend an inordinate amount of time simply handling
240 * the retirement of requests and processing their callbacks.
241 * Of which, this loop itself is particularly hot due to the
242 * cache misses when jumping around the list of
243 * i915_active_request. So we try to keep this loop as
244 * streamlined as possible and also prefetch the next
245 * i915_active_request to try and hide the likely cache miss.
249 INIT_LIST_HEAD(&active->link);
250 RCU_INIT_POINTER(active->request, NULL);
252 active->retire(active, rq);
258 * We only loosely track inflight requests across preemption,
259 * and so we may find ourselves attempting to retire a _completed_
260 * request that we have removed from the HW and put back on a run
263 spin_lock(&rq->engine->active.lock);
264 list_del(&rq->sched.link);
265 spin_unlock(&rq->engine->active.lock);
267 spin_lock(&rq->lock);
268 i915_request_mark_complete(rq);
269 if (!i915_request_signaled(rq))
270 dma_fence_signal_locked(&rq->fence);
271 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
272 i915_request_cancel_breadcrumb(rq);
273 if (i915_request_has_waitboost(rq)) {
274 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
275 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
277 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
278 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
279 __notify_execute_cb(rq);
281 GEM_BUG_ON(!list_empty(&rq->execute_cb));
282 spin_unlock(&rq->lock);
286 remove_from_client(rq);
289 intel_context_exit(rq->hw_context);
290 intel_context_unpin(rq->hw_context);
292 free_capture_list(rq);
293 i915_sched_node_fini(&rq->sched);
294 i915_request_put(rq);
299 void i915_request_retire_upto(struct i915_request *rq)
301 struct intel_timeline * const tl = rq->timeline;
302 struct i915_request *tmp;
304 GEM_TRACE("%s fence %llx:%lld, current %d\n",
306 rq->fence.context, rq->fence.seqno,
309 lockdep_assert_held(&tl->mutex);
310 GEM_BUG_ON(!i915_request_completed(rq));
313 tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
314 } while (i915_request_retire(tmp) && tmp != rq);
318 __i915_request_await_execution(struct i915_request *rq,
319 struct i915_request *signal,
320 void (*hook)(struct i915_request *rq,
321 struct dma_fence *signal),
324 struct execute_cb *cb;
326 if (i915_request_is_active(signal)) {
328 hook(rq, &signal->fence);
332 cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
336 cb->fence = &rq->submit;
337 i915_sw_fence_await(cb->fence);
338 init_irq_work(&cb->work, irq_execute_cb);
342 cb->signal = i915_request_get(signal);
343 cb->work.func = irq_execute_cb_hook;
346 spin_lock_irq(&signal->lock);
347 if (i915_request_is_active(signal)) {
349 hook(rq, &signal->fence);
350 i915_request_put(signal);
352 i915_sw_fence_complete(cb->fence);
353 kmem_cache_free(global.slab_execute_cbs, cb);
355 list_add_tail(&cb->link, &signal->execute_cb);
357 spin_unlock_irq(&signal->lock);
362 void __i915_request_submit(struct i915_request *request)
364 struct intel_engine_cs *engine = request->engine;
366 GEM_TRACE("%s fence %llx:%lld, current %d\n",
368 request->fence.context, request->fence.seqno,
369 hwsp_seqno(request));
371 GEM_BUG_ON(!irqs_disabled());
372 lockdep_assert_held(&engine->active.lock);
374 if (i915_gem_context_is_banned(request->gem_context))
375 i915_request_skip(request, -EIO);
378 * Are we using semaphores when the gpu is already saturated?
380 * Using semaphores incurs a cost in having the GPU poll a
381 * memory location, busywaiting for it to change. The continual
382 * memory reads can have a noticeable impact on the rest of the
383 * system with the extra bus traffic, stalling the cpu as it too
384 * tries to access memory across the bus (perf stat -e bus-cycles).
386 * If we installed a semaphore on this request and we only submit
387 * the request after the signaler completed, that indicates the
388 * system is overloaded and using semaphores at this time only
389 * increases the amount of work we are doing. If so, we disable
390 * further use of semaphores until we are idle again, whence we
391 * optimistically try again.
393 if (request->sched.semaphores &&
394 i915_sw_fence_signaled(&request->semaphore))
395 engine->saturated |= request->sched.semaphores;
397 /* We may be recursing from the signal callback of another i915 fence */
398 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
400 list_move_tail(&request->sched.link, &engine->active.requests);
402 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
403 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
405 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
406 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
407 !i915_request_enable_breadcrumb(request))
408 intel_engine_queue_breadcrumbs(engine);
410 __notify_execute_cb(request);
412 spin_unlock(&request->lock);
414 engine->emit_fini_breadcrumb(request,
415 request->ring->vaddr + request->postfix);
419 trace_i915_request_execute(request);
422 void i915_request_submit(struct i915_request *request)
424 struct intel_engine_cs *engine = request->engine;
427 /* Will be called from irq-context when using foreign fences. */
428 spin_lock_irqsave(&engine->active.lock, flags);
430 __i915_request_submit(request);
432 spin_unlock_irqrestore(&engine->active.lock, flags);
435 void __i915_request_unsubmit(struct i915_request *request)
437 struct intel_engine_cs *engine = request->engine;
439 GEM_TRACE("%s fence %llx:%lld, current %d\n",
441 request->fence.context, request->fence.seqno,
442 hwsp_seqno(request));
444 GEM_BUG_ON(!irqs_disabled());
445 lockdep_assert_held(&engine->active.lock);
448 * Only unwind in reverse order, required so that the per-context list
449 * is kept in seqno/ring order.
452 /* We may be recursing from the signal callback of another i915 fence */
453 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
455 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
456 i915_request_cancel_breadcrumb(request);
458 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
459 clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
461 spin_unlock(&request->lock);
463 /* We've already spun, don't charge on resubmitting. */
464 if (request->sched.semaphores && i915_request_started(request)) {
465 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
466 request->sched.semaphores = 0;
470 * We don't need to wake_up any waiters on request->execute, they
471 * will get woken by any other event or us re-adding this request
472 * to the engine timeline (__i915_request_submit()). The waiters
473 * should be quite adapt at finding that the request now has a new
474 * global_seqno to the one they went to sleep on.
478 void i915_request_unsubmit(struct i915_request *request)
480 struct intel_engine_cs *engine = request->engine;
483 /* Will be called from irq-context when using foreign fences. */
484 spin_lock_irqsave(&engine->active.lock, flags);
486 __i915_request_unsubmit(request);
488 spin_unlock_irqrestore(&engine->active.lock, flags);
491 static int __i915_sw_fence_call
492 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
494 struct i915_request *request =
495 container_of(fence, typeof(*request), submit);
499 trace_i915_request_submit(request);
501 if (unlikely(fence->error))
502 i915_request_skip(request, fence->error);
505 * We need to serialize use of the submit_request() callback
506 * with its hotplugging performed during an emergency
507 * i915_gem_set_wedged(). We use the RCU mechanism to mark the
508 * critical section in order to force i915_gem_set_wedged() to
509 * wait until the submit_request() is completed before
513 request->engine->submit_request(request);
518 i915_request_put(request);
525 static int __i915_sw_fence_call
526 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
528 struct i915_request *request =
529 container_of(fence, typeof(*request), semaphore);
533 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
537 i915_request_put(request);
544 static void retire_requests(struct intel_timeline *tl)
546 struct i915_request *rq, *rn;
548 list_for_each_entry_safe(rq, rn, &tl->requests, link)
549 if (!i915_request_retire(rq))
553 static noinline struct i915_request *
554 request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
556 struct i915_request *rq;
558 if (list_empty(&tl->requests))
561 if (!gfpflags_allow_blocking(gfp))
564 /* Move our oldest request to the slab-cache (if not in use!) */
565 rq = list_first_entry(&tl->requests, typeof(*rq), link);
566 i915_request_retire(rq);
568 rq = kmem_cache_alloc(global.slab_requests,
569 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
573 /* Ratelimit ourselves to prevent oom from malicious clients */
574 rq = list_last_entry(&tl->requests, typeof(*rq), link);
575 cond_synchronize_rcu(rq->rcustate);
577 /* Retire our old requests in the hope that we free some */
581 return kmem_cache_alloc(global.slab_requests, gfp);
584 struct i915_request *
585 __i915_request_create(struct intel_context *ce, gfp_t gfp)
587 struct intel_timeline *tl = ce->timeline;
588 struct i915_request *rq;
592 might_sleep_if(gfpflags_allow_blocking(gfp));
594 /* Check that the caller provided an already pinned context */
595 __intel_context_pin(ce);
598 * Beware: Dragons be flying overhead.
600 * We use RCU to look up requests in flight. The lookups may
601 * race with the request being allocated from the slab freelist.
602 * That is the request we are writing to here, may be in the process
603 * of being read by __i915_active_request_get_rcu(). As such,
604 * we have to be very careful when overwriting the contents. During
605 * the RCU lookup, we change chase the request->engine pointer,
606 * read the request->global_seqno and increment the reference count.
608 * The reference count is incremented atomically. If it is zero,
609 * the lookup knows the request is unallocated and complete. Otherwise,
610 * it is either still in use, or has been reallocated and reset
611 * with dma_fence_init(). This increment is safe for release as we
612 * check that the request we have a reference to and matches the active
615 * Before we increment the refcount, we chase the request->engine
616 * pointer. We must not call kmem_cache_zalloc() or else we set
617 * that pointer to NULL and cause a crash during the lookup. If
618 * we see the request is completed (based on the value of the
619 * old engine and seqno), the lookup is complete and reports NULL.
620 * If we decide the request is not completed (new engine or seqno),
621 * then we grab a reference and double check that it is still the
622 * active request - which it won't be and restart the lookup.
624 * Do not use kmem_cache_zalloc() here!
626 rq = kmem_cache_alloc(global.slab_requests,
627 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
629 rq = request_alloc_slow(tl, gfp);
636 ret = intel_timeline_get_seqno(tl, rq, &seqno);
640 rq->i915 = ce->engine->i915;
642 rq->gem_context = ce->gem_context;
643 rq->engine = ce->engine;
646 rq->hwsp_seqno = tl->hwsp_seqno;
647 rq->hwsp_cacheline = tl->hwsp_cacheline;
648 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
650 spin_lock_init(&rq->lock);
651 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
652 tl->fence_context, seqno);
654 /* We bump the ref for the fence chain */
655 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
656 i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
658 i915_sched_node_init(&rq->sched);
660 /* No zalloc, must clear what we need by hand */
661 rq->file_priv = NULL;
663 rq->capture_list = NULL;
665 rq->execution_mask = ALL_ENGINES;
667 INIT_LIST_HEAD(&rq->active_list);
668 INIT_LIST_HEAD(&rq->execute_cb);
671 * Reserve space in the ring buffer for all the commands required to
672 * eventually emit this request. This is to guarantee that the
673 * i915_request_add() call can't fail. Note that the reserve may need
674 * to be redone if the request is not actually submitted straight
675 * away, e.g. because a GPU scheduler has deferred it.
677 * Note that due to how we add reserved_space to intel_ring_begin()
678 * we need to double our request to ensure that if we need to wrap
679 * around inside i915_request_add() there is sufficient space at
680 * the beginning of the ring as well.
683 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
686 * Record the position of the start of the request so that
687 * should we detect the updated seqno part-way through the
688 * GPU processing the request, we never over-estimate the
689 * position of the head.
691 rq->head = rq->ring->emit;
693 ret = rq->engine->request_alloc(rq);
697 rq->infix = rq->ring->emit; /* end of header; start of user payload */
699 intel_context_mark_active(ce);
703 ce->ring->emit = rq->head;
705 /* Make sure we didn't add ourselves to external state before freeing */
706 GEM_BUG_ON(!list_empty(&rq->active_list));
707 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
708 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
711 kmem_cache_free(global.slab_requests, rq);
713 intel_context_unpin(ce);
717 struct i915_request *
718 i915_request_create(struct intel_context *ce)
720 struct i915_request *rq;
721 struct intel_timeline *tl;
723 tl = intel_context_timeline_lock(ce);
727 /* Move our oldest request to the slab-cache (if not in use!) */
728 rq = list_first_entry(&tl->requests, typeof(*rq), link);
729 if (!list_is_last(&rq->link, &tl->requests))
730 i915_request_retire(rq);
732 intel_context_enter(ce);
733 rq = __i915_request_create(ce, GFP_KERNEL);
734 intel_context_exit(ce); /* active reference transferred to request */
738 /* Check that we do not interrupt ourselves with a new request */
739 rq->cookie = lockdep_pin_lock(&tl->mutex);
744 intel_context_timeline_unlock(tl);
749 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
751 if (list_is_first(&signal->link, &signal->timeline->requests))
754 signal = list_prev_entry(signal, link);
755 if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
758 return i915_sw_fence_await_dma_fence(&rq->submit,
763 static intel_engine_mask_t
764 already_busywaiting(struct i915_request *rq)
767 * Polling a semaphore causes bus traffic, delaying other users of
768 * both the GPU and CPU. We want to limit the impact on others,
769 * while taking advantage of early submission to reduce GPU
770 * latency. Therefore we restrict ourselves to not using more
771 * than one semaphore from each source, and not using a semaphore
772 * if we have detected the engine is saturated (i.e. would not be
773 * submitted early and cause bus traffic reading an already passed
776 * See the are-we-too-late? check in __i915_request_submit().
778 return rq->sched.semaphores | rq->engine->saturated;
782 emit_semaphore_wait(struct i915_request *to,
783 struct i915_request *from,
790 GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
791 GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
793 /* Just emit the first semaphore we see as request space is limited. */
794 if (already_busywaiting(to) & from->engine->mask)
795 return i915_sw_fence_await_dma_fence(&to->submit,
799 err = i915_request_await_start(to, from);
803 /* Only submit our spinner after the signaler is running! */
804 err = __i915_request_await_execution(to, from, NULL, gfp);
808 /* We need to pin the signaler's HWSP until we are finished reading. */
809 err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
813 cs = intel_ring_begin(to, 4);
818 * Using greater-than-or-equal here means we have to worry
819 * about seqno wraparound. To side step that issue, we swap
820 * the timeline HWSP upon wrapping, so that everyone listening
821 * for the old (pre-wrap) values do not see the much smaller
822 * (post-wrap) values than they were expecting (and so wait
825 *cs++ = MI_SEMAPHORE_WAIT |
826 MI_SEMAPHORE_GLOBAL_GTT |
828 MI_SEMAPHORE_SAD_GTE_SDD;
829 *cs++ = from->fence.seqno;
833 intel_ring_advance(to, cs);
834 to->sched.semaphores |= from->engine->mask;
835 to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
840 i915_request_await_request(struct i915_request *to, struct i915_request *from)
844 GEM_BUG_ON(to == from);
845 GEM_BUG_ON(to->timeline == from->timeline);
847 if (i915_request_completed(from))
850 if (to->engine->schedule) {
851 ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
856 if (to->engine == from->engine) {
857 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
860 } else if (intel_engine_has_semaphores(to->engine) &&
861 to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
862 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
864 ret = i915_sw_fence_await_dma_fence(&to->submit,
871 if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
872 ret = i915_sw_fence_await_dma_fence(&to->semaphore,
883 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
885 struct dma_fence **child = &fence;
886 unsigned int nchild = 1;
890 * Note that if the fence-array was created in signal-on-any mode,
891 * we should *not* decompose it into its individual fences. However,
892 * we don't currently store which mode the fence-array is operating
893 * in. Fortunately, the only user of signal-on-any is private to
894 * amdgpu and we should not see any incoming fence-array from
895 * sync-file being in signal-on-any mode.
897 if (dma_fence_is_array(fence)) {
898 struct dma_fence_array *array = to_dma_fence_array(fence);
900 child = array->fences;
901 nchild = array->num_fences;
907 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
911 * Requests on the same timeline are explicitly ordered, along
912 * with their dependencies, by i915_request_add() which ensures
913 * that requests are submitted in-order through each ring.
915 if (fence->context == rq->fence.context)
918 /* Squash repeated waits to the same timelines */
919 if (fence->context &&
920 intel_timeline_sync_is_later(rq->timeline, fence))
923 if (dma_fence_is_i915(fence))
924 ret = i915_request_await_request(rq, to_request(fence));
926 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
932 /* Record the latest fence used against each timeline */
934 intel_timeline_sync_set(rq->timeline, fence);
941 i915_request_await_execution(struct i915_request *rq,
942 struct dma_fence *fence,
943 void (*hook)(struct i915_request *rq,
944 struct dma_fence *signal))
946 struct dma_fence **child = &fence;
947 unsigned int nchild = 1;
950 if (dma_fence_is_array(fence)) {
951 struct dma_fence_array *array = to_dma_fence_array(fence);
953 /* XXX Error for signal-on-any fence arrays */
955 child = array->fences;
956 nchild = array->num_fences;
962 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
966 * We don't squash repeated fence dependencies here as we
967 * want to run our callback in all cases.
970 if (dma_fence_is_i915(fence))
971 ret = __i915_request_await_execution(rq,
976 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
987 * i915_request_await_object - set this request to (async) wait upon a bo
988 * @to: request we are wishing to use
989 * @obj: object which may be in use on another ring.
990 * @write: whether the wait is on behalf of a writer
992 * This code is meant to abstract object synchronization with the GPU.
993 * Conceptually we serialise writes between engines inside the GPU.
994 * We only allow one engine to write into a buffer at any time, but
995 * multiple readers. To ensure each has a coherent view of memory, we must:
997 * - If there is an outstanding write request to the object, the new
998 * request must wait for it to complete (either CPU or in hw, requests
999 * on the same ring will be naturally ordered).
1001 * - If we are a write request (pending_write_domain is set), the new
1002 * request must wait for outstanding read requests to complete.
1004 * Returns 0 if successful, else propagates up the lower layer error.
1007 i915_request_await_object(struct i915_request *to,
1008 struct drm_i915_gem_object *obj,
1011 struct dma_fence *excl;
1015 struct dma_fence **shared;
1016 unsigned int count, i;
1018 ret = dma_resv_get_fences_rcu(obj->base.resv,
1019 &excl, &count, &shared);
1023 for (i = 0; i < count; i++) {
1024 ret = i915_request_await_dma_fence(to, shared[i]);
1028 dma_fence_put(shared[i]);
1031 for (; i < count; i++)
1032 dma_fence_put(shared[i]);
1035 excl = dma_resv_get_excl_rcu(obj->base.resv);
1040 ret = i915_request_await_dma_fence(to, excl);
1042 dma_fence_put(excl);
1048 void i915_request_skip(struct i915_request *rq, int error)
1050 void *vaddr = rq->ring->vaddr;
1053 GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1054 dma_fence_set_error(&rq->fence, error);
1056 if (rq->infix == rq->postfix)
1060 * As this request likely depends on state from the lost
1061 * context, clear out all the user operations leaving the
1062 * breadcrumb at the end (so we get the fence notifications).
1065 if (rq->postfix < head) {
1066 memset(vaddr + head, 0, rq->ring->size - head);
1069 memset(vaddr + head, 0, rq->postfix - head);
1070 rq->infix = rq->postfix;
1073 static struct i915_request *
1074 __i915_request_add_to_timeline(struct i915_request *rq)
1076 struct intel_timeline *timeline = rq->timeline;
1077 struct i915_request *prev;
1080 * Dependency tracking and request ordering along the timeline
1081 * is special cased so that we can eliminate redundant ordering
1082 * operations while building the request (we know that the timeline
1083 * itself is ordered, and here we guarantee it).
1085 * As we know we will need to emit tracking along the timeline,
1086 * we embed the hooks into our request struct -- at the cost of
1087 * having to have specialised no-allocation interfaces (which will
1088 * be beneficial elsewhere).
1090 * A second benefit to open-coding i915_request_await_request is
1091 * that we can apply a slight variant of the rules specialised
1092 * for timelines that jump between engines (such as virtual engines).
1093 * If we consider the case of virtual engine, we must emit a dma-fence
1094 * to prevent scheduling of the second request until the first is
1095 * complete (to maximise our greedy late load balancing) and this
1096 * precludes optimising to use semaphores serialisation of a single
1097 * timeline across engines.
1099 prev = rcu_dereference_protected(timeline->last_request.request,
1100 lockdep_is_held(&timeline->mutex));
1101 if (prev && !i915_request_completed(prev)) {
1102 if (is_power_of_2(prev->engine->mask | rq->engine->mask))
1103 i915_sw_fence_await_sw_fence(&rq->submit,
1107 __i915_sw_fence_await_dma_fence(&rq->submit,
1110 if (rq->engine->schedule)
1111 __i915_sched_node_add_dependency(&rq->sched,
1117 list_add_tail(&rq->link, &timeline->requests);
1120 * Make sure that no request gazumped us - if it was allocated after
1121 * our i915_request_alloc() and called __i915_request_add() before
1122 * us, the timeline will hold its seqno which is later than ours.
1124 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1125 __i915_active_request_set(&timeline->last_request, rq);
1131 * NB: This function is not allowed to fail. Doing so would mean the the
1132 * request is not being tracked for completion but the work itself is
1133 * going to happen on the hardware. This would be a Bad Thing(tm).
1135 struct i915_request *__i915_request_commit(struct i915_request *rq)
1137 struct intel_engine_cs *engine = rq->engine;
1138 struct intel_ring *ring = rq->ring;
1141 GEM_TRACE("%s fence %llx:%lld\n",
1142 engine->name, rq->fence.context, rq->fence.seqno);
1145 * To ensure that this call will not fail, space for its emissions
1146 * should already have been reserved in the ring buffer. Let the ring
1147 * know that it is time to use that space up.
1149 GEM_BUG_ON(rq->reserved_space > ring->space);
1150 rq->reserved_space = 0;
1151 rq->emitted_jiffies = jiffies;
1154 * Record the position of the start of the breadcrumb so that
1155 * should we detect the updated seqno part-way through the
1156 * GPU processing the request, we never over-estimate the
1157 * position of the ring's HEAD.
1159 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1160 GEM_BUG_ON(IS_ERR(cs));
1161 rq->postfix = intel_ring_offset(rq, cs);
1163 return __i915_request_add_to_timeline(rq);
1166 void __i915_request_queue(struct i915_request *rq,
1167 const struct i915_sched_attr *attr)
1170 * Let the backend know a new request has arrived that may need
1171 * to adjust the existing execution schedule due to a high priority
1172 * request - i.e. we may want to preempt the current request in order
1173 * to run a high priority dependency chain *before* we can execute this
1176 * This is called before the request is ready to run so that we can
1177 * decide whether to preempt the entire chain so that it is ready to
1178 * run at the earliest possible convenience.
1180 i915_sw_fence_commit(&rq->semaphore);
1181 if (attr && rq->engine->schedule)
1182 rq->engine->schedule(rq, attr);
1183 i915_sw_fence_commit(&rq->submit);
1186 void i915_request_add(struct i915_request *rq)
1188 struct i915_sched_attr attr = rq->gem_context->sched;
1189 struct intel_timeline * const tl = rq->timeline;
1190 struct i915_request *prev;
1192 lockdep_assert_held(&tl->mutex);
1193 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1195 trace_i915_request_add(rq);
1197 prev = __i915_request_commit(rq);
1200 * Boost actual workloads past semaphores!
1202 * With semaphores we spin on one engine waiting for another,
1203 * simply to reduce the latency of starting our work when
1204 * the signaler completes. However, if there is any other
1205 * work that we could be doing on this engine instead, that
1206 * is better utilisation and will reduce the overall duration
1207 * of the current work. To avoid PI boosting a semaphore
1208 * far in the distance past over useful work, we keep a history
1209 * of any semaphore use along our dependency chain.
1211 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
1212 attr.priority |= I915_PRIORITY_NOSEMAPHORE;
1215 * Boost priorities to new clients (new request flows).
1217 * Allow interactive/synchronous clients to jump ahead of
1218 * the bulk clients. (FQ_CODEL)
1220 if (list_empty(&rq->sched.signalers_list))
1221 attr.priority |= I915_PRIORITY_WAIT;
1224 __i915_request_queue(rq, &attr);
1225 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1228 * In typical scenarios, we do not expect the previous request on
1229 * the timeline to be still tracked by timeline->last_request if it
1230 * has been completed. If the completed request is still here, that
1231 * implies that request retirement is a long way behind submission,
1232 * suggesting that we haven't been retiring frequently enough from
1233 * the combination of retire-before-alloc, waiters and the background
1234 * retirement worker. So if the last request on this timeline was
1235 * already completed, do a catch up pass, flushing the retirement queue
1236 * up to this client. Since we have now moved the heaviest operations
1237 * during retirement onto secondary workers, such as freeing objects
1238 * or contexts, retiring a bunch of requests is mostly list management
1239 * (and cache misses), and so we should not be overly penalizing this
1240 * client by performing excess work, though we may still performing
1241 * work on behalf of others -- but instead we should benefit from
1242 * improved resource management. (Well, that's the theory at least.)
1244 if (prev && i915_request_completed(prev) && prev->timeline == tl)
1245 i915_request_retire_upto(prev);
1247 mutex_unlock(&tl->mutex);
1250 static unsigned long local_clock_us(unsigned int *cpu)
1255 * Cheaply and approximately convert from nanoseconds to microseconds.
1256 * The result and subsequent calculations are also defined in the same
1257 * approximate microseconds units. The principal source of timing
1258 * error here is from the simple truncation.
1260 * Note that local_clock() is only defined wrt to the current CPU;
1261 * the comparisons are no longer valid if we switch CPUs. Instead of
1262 * blocking preemption for the entire busywait, we can detect the CPU
1263 * switch and use that as indicator of system load and a reason to
1264 * stop busywaiting, see busywait_stop().
1267 t = local_clock() >> 10;
1273 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1275 unsigned int this_cpu;
1277 if (time_after(local_clock_us(&this_cpu), timeout))
1280 return this_cpu != cpu;
1283 static bool __i915_spin_request(const struct i915_request * const rq,
1284 int state, unsigned long timeout_us)
1289 * Only wait for the request if we know it is likely to complete.
1291 * We don't track the timestamps around requests, nor the average
1292 * request length, so we do not have a good indicator that this
1293 * request will complete within the timeout. What we do know is the
1294 * order in which requests are executed by the context and so we can
1295 * tell if the request has been started. If the request is not even
1296 * running yet, it is a fair assumption that it will not complete
1297 * within our relatively short timeout.
1299 if (!i915_request_is_running(rq))
1303 * When waiting for high frequency requests, e.g. during synchronous
1304 * rendering split between the CPU and GPU, the finite amount of time
1305 * required to set up the irq and wait upon it limits the response
1306 * rate. By busywaiting on the request completion for a short while we
1307 * can service the high frequency waits as quick as possible. However,
1308 * if it is a slow request, we want to sleep as quickly as possible.
1309 * The tradeoff between waiting and sleeping is roughly the time it
1310 * takes to sleep on a request, on the order of a microsecond.
1313 timeout_us += local_clock_us(&cpu);
1315 if (i915_request_completed(rq))
1318 if (signal_pending_state(state, current))
1321 if (busywait_stop(timeout_us, cpu))
1325 } while (!need_resched());
1330 struct request_wait {
1331 struct dma_fence_cb cb;
1332 struct task_struct *tsk;
1335 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1337 struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1339 wake_up_process(wait->tsk);
1343 * i915_request_wait - wait until execution of request has finished
1344 * @rq: the request to wait upon
1345 * @flags: how to wait
1346 * @timeout: how long to wait in jiffies
1348 * i915_request_wait() waits for the request to be completed, for a
1349 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1352 * Returns the remaining time (in jiffies) if the request completed, which may
1353 * be zero or -ETIME if the request is unfinished after the timeout expires.
1354 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1355 * pending before the request completes.
1357 long i915_request_wait(struct i915_request *rq,
1361 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1362 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1363 struct request_wait wait;
1366 GEM_BUG_ON(timeout < 0);
1368 if (dma_fence_is_signaled(&rq->fence))
1374 trace_i915_request_wait_begin(rq, flags);
1377 * We must never wait on the GPU while holding a lock as we
1378 * may need to perform a GPU reset. So while we don't need to
1379 * serialise wait/reset with an explicit lock, we do want
1380 * lockdep to detect potential dependency cycles.
1382 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1385 * Optimistic spin before touching IRQs.
1387 * We may use a rather large value here to offset the penalty of
1388 * switching away from the active task. Frequently, the client will
1389 * wait upon an old swapbuffer to throttle itself to remain within a
1390 * frame of the gpu. If the client is running in lockstep with the gpu,
1391 * then it should not be waiting long at all, and a sleep now will incur
1392 * extra scheduler latency in producing the next frame. To try to
1393 * avoid adding the cost of enabling/disabling the interrupt to the
1394 * short wait, we first spin to see if the request would have completed
1395 * in the time taken to setup the interrupt.
1397 * We need upto 5us to enable the irq, and upto 20us to hide the
1398 * scheduler latency of a context switch, ignoring the secondary
1399 * impacts from a context switch such as cache eviction.
1401 * The scheme used for low-latency IO is called "hybrid interrupt
1402 * polling". The suggestion there is to sleep until just before you
1403 * expect to be woken by the device interrupt and then poll for its
1404 * completion. That requires having a good predictor for the request
1405 * duration, which we currently lack.
1407 if (CONFIG_DRM_I915_SPIN_REQUEST &&
1408 __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
1409 dma_fence_signal(&rq->fence);
1414 * This client is about to stall waiting for the GPU. In many cases
1415 * this is undesirable and limits the throughput of the system, as
1416 * many clients cannot continue processing user input/output whilst
1417 * blocked. RPS autotuning may take tens of milliseconds to respond
1418 * to the GPU load and thus incurs additional latency for the client.
1419 * We can circumvent that by promoting the GPU frequency to maximum
1420 * before we sleep. This makes the GPU throttle up much more quickly
1421 * (good for benchmarks and user experience, e.g. window animations),
1422 * but at a cost of spending more power processing the workload
1423 * (bad for battery).
1425 if (flags & I915_WAIT_PRIORITY) {
1426 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
1428 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1432 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1436 set_current_state(state);
1438 if (i915_request_completed(rq)) {
1439 dma_fence_signal(&rq->fence);
1443 if (signal_pending_state(state, current)) {
1444 timeout = -ERESTARTSYS;
1453 timeout = io_schedule_timeout(timeout);
1455 __set_current_state(TASK_RUNNING);
1457 dma_fence_remove_callback(&rq->fence, &wait.cb);
1460 mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
1461 trace_i915_request_wait_end(rq);
1465 bool i915_retire_requests(struct drm_i915_private *i915)
1467 struct intel_gt_timelines *timelines = &i915->gt.timelines;
1468 struct intel_timeline *tl, *tn;
1469 unsigned long flags;
1472 spin_lock_irqsave(&timelines->lock, flags);
1473 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
1474 if (!mutex_trylock(&tl->mutex))
1477 intel_timeline_get(tl);
1478 GEM_BUG_ON(!tl->active_count);
1479 tl->active_count++; /* pin the list element */
1480 spin_unlock_irqrestore(&timelines->lock, flags);
1482 retire_requests(tl);
1484 spin_lock_irqsave(&timelines->lock, flags);
1486 /* Resume iteration after dropping lock */
1487 list_safe_reset_next(tl, tn, link);
1488 if (!--tl->active_count)
1489 list_del(&tl->link);
1491 mutex_unlock(&tl->mutex);
1493 /* Defer the final release to after the spinlock */
1494 if (refcount_dec_and_test(&tl->kref.refcount)) {
1495 GEM_BUG_ON(tl->active_count);
1496 list_add(&tl->link, &free);
1499 spin_unlock_irqrestore(&timelines->lock, flags);
1501 list_for_each_entry_safe(tl, tn, &free, link)
1502 __intel_timeline_free(&tl->kref);
1504 return !list_empty(&timelines->active_list);
1507 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1508 #include "selftests/mock_request.c"
1509 #include "selftests/i915_request.c"
1512 static void i915_global_request_shrink(void)
1514 kmem_cache_shrink(global.slab_dependencies);
1515 kmem_cache_shrink(global.slab_execute_cbs);
1516 kmem_cache_shrink(global.slab_requests);
1519 static void i915_global_request_exit(void)
1521 kmem_cache_destroy(global.slab_dependencies);
1522 kmem_cache_destroy(global.slab_execute_cbs);
1523 kmem_cache_destroy(global.slab_requests);
1526 static struct i915_global_request global = { {
1527 .shrink = i915_global_request_shrink,
1528 .exit = i915_global_request_exit,
1531 int __init i915_global_request_init(void)
1533 global.slab_requests = KMEM_CACHE(i915_request,
1534 SLAB_HWCACHE_ALIGN |
1535 SLAB_RECLAIM_ACCOUNT |
1536 SLAB_TYPESAFE_BY_RCU);
1537 if (!global.slab_requests)
1540 global.slab_execute_cbs = KMEM_CACHE(execute_cb,
1541 SLAB_HWCACHE_ALIGN |
1542 SLAB_RECLAIM_ACCOUNT |
1543 SLAB_TYPESAFE_BY_RCU);
1544 if (!global.slab_execute_cbs)
1547 global.slab_dependencies = KMEM_CACHE(i915_dependency,
1548 SLAB_HWCACHE_ALIGN |
1549 SLAB_RECLAIM_ACCOUNT);
1550 if (!global.slab_dependencies)
1551 goto err_execute_cbs;
1553 i915_global_register(&global.base);
1557 kmem_cache_destroy(global.slab_execute_cbs);
1559 kmem_cache_destroy(global.slab_requests);