1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include <linux/circ_buf.h>
8 #include "gem/i915_gem_context.h"
9 #include "gt/gen8_engine_cs.h"
10 #include "gt/intel_breadcrumbs.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_engine_heartbeat.h"
13 #include "gt/intel_engine_pm.h"
14 #include "gt/intel_engine_regs.h"
15 #include "gt/intel_gpu_commands.h"
16 #include "gt/intel_gt.h"
17 #include "gt/intel_gt_clock_utils.h"
18 #include "gt/intel_gt_irq.h"
19 #include "gt/intel_gt_pm.h"
20 #include "gt/intel_gt_regs.h"
21 #include "gt/intel_gt_requests.h"
22 #include "gt/intel_lrc.h"
23 #include "gt/intel_lrc_reg.h"
24 #include "gt/intel_mocs.h"
25 #include "gt/intel_ring.h"
27 #include "intel_guc_ads.h"
28 #include "intel_guc_submission.h"
31 #include "i915_trace.h"
34 * DOC: GuC-based command submission
36 * The Scratch registers:
37 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
38 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
39 * triggers an interrupt on the GuC via another register write (0xC4C8).
40 * Firmware writes a success/fail code back to the action register after
41 * processes the request. The kernel driver polls waiting for this update and
44 * Command Transport buffers (CTBs):
45 * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
46 * - G2H) are a message interface between the i915 and GuC.
48 * Context registration:
49 * Before a context can be submitted it must be registered with the GuC via a
50 * H2G. A unique guc_id is associated with each context. The context is either
51 * registered at request creation time (normal operation) or at submission time
52 * (abnormal operation, e.g. after a reset).
55 * The i915 updates the LRC tail value in memory. The i915 must enable the
56 * scheduling of the context within the GuC for the GuC to actually consider it.
57 * Therefore, the first time a disabled context is submitted we use a schedule
58 * enable H2G, while follow up submissions are done via the context submit H2G,
59 * which informs the GuC that a previously enabled context has new work
63 * To unpin a context a H2G is used to disable scheduling. When the
64 * corresponding G2H returns indicating the scheduling disable operation has
65 * completed it is safe to unpin the context. While a disable is in flight it
66 * isn't safe to resubmit the context so a fence is used to stall all future
67 * requests of that context until the G2H is returned.
69 * Context deregistration:
70 * Before a context can be destroyed or if we steal its guc_id we must
71 * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
72 * safe to submit anything to this guc_id until the deregister completes so a
73 * fence is used to stall all requests associated with this guc_id until the
74 * corresponding G2H returns indicating the guc_id has been deregistered.
76 * submission_state.guc_ids:
77 * Unique number associated with private GuC context data passed in during
78 * context registration / submission / deregistration. 64k available. Simple ida
79 * is used for allocation.
82 * If no guc_ids are available they can be stolen from another context at
83 * request creation time if that context is unpinned. If a guc_id can't be found
84 * we punt this problem to the user as we believe this is near impossible to hit
85 * during normal use cases.
88 * In the GuC submission code we have 3 basic spin locks which protect
89 * everything. Details about each below.
92 * This is the submission lock for all contexts that share an i915 schedule
93 * engine (sched_engine), thus only one of the contexts which share a
94 * sched_engine can be submitting at a time. Currently only one sched_engine is
95 * used for all of GuC submission but that could change in the future.
97 * guc->submission_state.lock
98 * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
102 * Protects everything under ce->guc_state. Ensures that a context is in the
103 * correct state before issuing a H2G. e.g. We don't issue a schedule disable
104 * on a disabled context (bad idea), we don't issue a schedule enable when a
105 * schedule disable is in flight, etc... Also protects list of inflight requests
106 * on the context and the priority management state. Lock is individual to each
109 * Lock ordering rules:
110 * sched_engine->lock -> ce->guc_state.lock
111 * guc->submission_state.lock -> ce->guc_state.lock
114 * When a full GT reset is triggered it is assumed that some G2H responses to
115 * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
116 * fatal as we do certain operations upon receiving a G2H (e.g. destroy
117 * contexts, release guc_ids, etc...). When this occurs we can scrub the
118 * context state and cleanup appropriately, however this is quite racey.
119 * To avoid races, the reset code must disable submission before scrubbing for
120 * the missing G2H, while the submission code must check for submission being
121 * disabled and skip sending H2Gs and updating context states when it is. Both
122 * sides must also make sure to hold the relevant locks.
125 /* GuC Virtual Engine */
126 struct guc_virtual_engine {
127 struct intel_engine_cs base;
128 struct intel_context context;
131 static struct intel_context *
132 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
133 unsigned long flags);
135 static struct intel_context *
136 guc_create_parallel(struct intel_engine_cs **engines,
137 unsigned int num_siblings,
140 #define GUC_REQUEST_SIZE 64 /* bytes */
143 * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
144 * per the GuC submission interface. A different allocation algorithm is used
145 * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
146 * partition the guc_id space. We believe the number of multi-lrc contexts in
147 * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
150 #define NUMBER_MULTI_LRC_GUC_ID(guc) \
151 ((guc)->submission_state.num_guc_ids / 16)
154 * Below is a set of functions which control the GuC scheduling state which
157 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
158 #define SCHED_STATE_DESTROYED BIT(1)
159 #define SCHED_STATE_PENDING_DISABLE BIT(2)
160 #define SCHED_STATE_BANNED BIT(3)
161 #define SCHED_STATE_ENABLED BIT(4)
162 #define SCHED_STATE_PENDING_ENABLE BIT(5)
163 #define SCHED_STATE_REGISTERED BIT(6)
164 #define SCHED_STATE_BLOCKED_SHIFT 7
165 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
166 #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
168 static inline void init_sched_state(struct intel_context *ce)
170 lockdep_assert_held(&ce->guc_state.lock);
171 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
175 static bool sched_state_is_init(struct intel_context *ce)
177 /* Kernel contexts can have SCHED_STATE_REGISTERED after suspend. */
178 return !(ce->guc_state.sched_state &
179 ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
183 context_wait_for_deregister_to_register(struct intel_context *ce)
185 return ce->guc_state.sched_state &
186 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
190 set_context_wait_for_deregister_to_register(struct intel_context *ce)
192 lockdep_assert_held(&ce->guc_state.lock);
193 ce->guc_state.sched_state |=
194 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
198 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
200 lockdep_assert_held(&ce->guc_state.lock);
201 ce->guc_state.sched_state &=
202 ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
206 context_destroyed(struct intel_context *ce)
208 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
212 set_context_destroyed(struct intel_context *ce)
214 lockdep_assert_held(&ce->guc_state.lock);
215 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
218 static inline bool context_pending_disable(struct intel_context *ce)
220 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
223 static inline void set_context_pending_disable(struct intel_context *ce)
225 lockdep_assert_held(&ce->guc_state.lock);
226 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
229 static inline void clr_context_pending_disable(struct intel_context *ce)
231 lockdep_assert_held(&ce->guc_state.lock);
232 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
235 static inline bool context_banned(struct intel_context *ce)
237 return ce->guc_state.sched_state & SCHED_STATE_BANNED;
240 static inline void set_context_banned(struct intel_context *ce)
242 lockdep_assert_held(&ce->guc_state.lock);
243 ce->guc_state.sched_state |= SCHED_STATE_BANNED;
246 static inline void clr_context_banned(struct intel_context *ce)
248 lockdep_assert_held(&ce->guc_state.lock);
249 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
252 static inline bool context_enabled(struct intel_context *ce)
254 return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
257 static inline void set_context_enabled(struct intel_context *ce)
259 lockdep_assert_held(&ce->guc_state.lock);
260 ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
263 static inline void clr_context_enabled(struct intel_context *ce)
265 lockdep_assert_held(&ce->guc_state.lock);
266 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
269 static inline bool context_pending_enable(struct intel_context *ce)
271 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
274 static inline void set_context_pending_enable(struct intel_context *ce)
276 lockdep_assert_held(&ce->guc_state.lock);
277 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
280 static inline void clr_context_pending_enable(struct intel_context *ce)
282 lockdep_assert_held(&ce->guc_state.lock);
283 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
286 static inline bool context_registered(struct intel_context *ce)
288 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
291 static inline void set_context_registered(struct intel_context *ce)
293 lockdep_assert_held(&ce->guc_state.lock);
294 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
297 static inline void clr_context_registered(struct intel_context *ce)
299 lockdep_assert_held(&ce->guc_state.lock);
300 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
303 static inline u32 context_blocked(struct intel_context *ce)
305 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
306 SCHED_STATE_BLOCKED_SHIFT;
309 static inline void incr_context_blocked(struct intel_context *ce)
311 lockdep_assert_held(&ce->guc_state.lock);
313 ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
315 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
318 static inline void decr_context_blocked(struct intel_context *ce)
320 lockdep_assert_held(&ce->guc_state.lock);
322 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
324 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
327 static inline bool context_has_committed_requests(struct intel_context *ce)
329 return !!ce->guc_state.number_committed_requests;
332 static inline void incr_context_committed_requests(struct intel_context *ce)
334 lockdep_assert_held(&ce->guc_state.lock);
335 ++ce->guc_state.number_committed_requests;
336 GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
339 static inline void decr_context_committed_requests(struct intel_context *ce)
341 lockdep_assert_held(&ce->guc_state.lock);
342 --ce->guc_state.number_committed_requests;
343 GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
346 static struct intel_context *
347 request_to_scheduling_context(struct i915_request *rq)
349 return intel_context_to_parent(rq->context);
352 static inline bool context_guc_id_invalid(struct intel_context *ce)
354 return ce->guc_id.id == GUC_INVALID_LRC_ID;
357 static inline void set_context_guc_id_invalid(struct intel_context *ce)
359 ce->guc_id.id = GUC_INVALID_LRC_ID;
362 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
364 return &ce->engine->gt->uc.guc;
367 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
369 return rb_entry(rb, struct i915_priolist, node);
373 * When using multi-lrc submission a scratch memory area is reserved in the
374 * parent's context state for the process descriptor, work queue, and handshake
375 * between the parent + children contexts to insert safe preemption points
376 * between each of the BBs. Currently the scratch area is sized to a page.
378 * The layout of this scratch area is below:
380 * + sizeof(struct guc_process_desc) child go
381 * + CACHELINE_BYTES child join[0]
383 * + CACHELINE_BYTES child join[n - 1]
385 * PARENT_SCRATCH_SIZE / 2 work queue start
387 * PARENT_SCRATCH_SIZE - 1 work queue end
389 #define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
390 #define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
392 struct sync_semaphore {
394 u8 unused[CACHELINE_BYTES - sizeof(u32)];
397 struct parent_scratch {
398 struct guc_process_desc pdesc;
400 struct sync_semaphore go;
401 struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
403 u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
404 sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
406 u32 wq[WQ_SIZE / sizeof(u32)];
409 static u32 __get_parent_scratch_offset(struct intel_context *ce)
411 GEM_BUG_ON(!ce->parallel.guc.parent_page);
413 return ce->parallel.guc.parent_page * PAGE_SIZE;
416 static u32 __get_wq_offset(struct intel_context *ce)
418 BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
420 return __get_parent_scratch_offset(ce) + WQ_OFFSET;
423 static struct parent_scratch *
424 __get_parent_scratch(struct intel_context *ce)
426 BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
427 BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
430 * Need to subtract LRC_STATE_OFFSET here as the
431 * parallel.guc.parent_page is the offset into ce->state while
432 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
434 return (struct parent_scratch *)
436 ((__get_parent_scratch_offset(ce) -
437 LRC_STATE_OFFSET) / sizeof(u32)));
440 static struct guc_process_desc *
441 __get_process_desc(struct intel_context *ce)
443 struct parent_scratch *ps = __get_parent_scratch(ce);
448 static u32 *get_wq_pointer(struct guc_process_desc *desc,
449 struct intel_context *ce,
453 * Check for space in work queue. Caching a value of head pointer in
454 * intel_context structure in order reduce the number accesses to shared
455 * GPU memory which may be across a PCIe bus.
457 #define AVAILABLE_SPACE \
458 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
459 if (wqi_size > AVAILABLE_SPACE) {
460 ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
462 if (wqi_size > AVAILABLE_SPACE)
465 #undef AVAILABLE_SPACE
467 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
470 static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
472 struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
474 GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
479 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
481 struct intel_context *ce = xa_load(&guc->context_lookup, id);
483 GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
488 static int guc_lrc_desc_pool_create(struct intel_guc *guc)
493 size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
494 GUC_MAX_LRC_DESCRIPTORS);
495 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
496 (void **)&guc->lrc_desc_pool_vaddr);
503 static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
505 guc->lrc_desc_pool_vaddr = NULL;
506 i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
509 static inline bool guc_submission_initialized(struct intel_guc *guc)
511 return guc->submission_initialized;
514 static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id)
516 struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
518 memset(desc, 0, sizeof(*desc));
521 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
523 return __get_context(guc, id);
526 static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
527 struct intel_context *ce)
532 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
533 * lower level functions directly.
535 xa_lock_irqsave(&guc->context_lookup, flags);
536 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
537 xa_unlock_irqrestore(&guc->context_lookup, flags);
540 static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
544 if (unlikely(!guc_submission_initialized(guc)))
547 _reset_lrc_desc(guc, id);
550 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
551 * the lower level functions directly.
553 xa_lock_irqsave(&guc->context_lookup, flags);
554 __xa_erase(&guc->context_lookup, id);
555 xa_unlock_irqrestore(&guc->context_lookup, flags);
558 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
560 if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
561 wake_up_all(&guc->ct.wq);
564 static int guc_submission_send_busy_loop(struct intel_guc *guc,
571 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
572 * so we don't handle the case where we don't get a reply because we
573 * aborted the send due to the channel being busy.
575 GEM_BUG_ON(g2h_len_dw && !loop);
578 atomic_inc(&guc->outstanding_submission_g2h);
580 return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
583 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
588 const int state = interruptible ?
589 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
593 GEM_BUG_ON(timeout < 0);
595 if (!atomic_read(wait_var))
602 prepare_to_wait(&guc->ct.wq, &wait, state);
604 if (!atomic_read(wait_var))
607 if (signal_pending_state(state, current)) {
617 timeout = io_schedule_timeout(timeout);
619 finish_wait(&guc->ct.wq, &wait);
621 return (timeout < 0) ? timeout : 0;
624 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
626 if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
629 return intel_guc_wait_for_pending_msg(guc,
630 &guc->outstanding_submission_g2h,
634 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
636 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
639 struct intel_context *ce = request_to_scheduling_context(rq);
645 lockdep_assert_held(&rq->engine->sched_engine->lock);
648 * Corner case where requests were sitting in the priority list or a
649 * request resubmitted after the context was banned.
651 if (unlikely(intel_context_is_banned(ce))) {
652 i915_request_put(i915_request_mark_eio(rq));
653 intel_engine_signal_breadcrumbs(ce->engine);
657 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
658 GEM_BUG_ON(context_guc_id_invalid(ce));
660 spin_lock(&ce->guc_state.lock);
663 * The request / context will be run on the hardware when scheduling
664 * gets enabled in the unblock. For multi-lrc we still submit the
665 * context to move the LRC tails.
667 if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
670 enabled = context_enabled(ce) || context_blocked(ce);
673 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
674 action[len++] = ce->guc_id.id;
675 action[len++] = GUC_CONTEXT_ENABLE;
676 set_context_pending_enable(ce);
677 intel_context_get(ce);
678 g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
680 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
681 action[len++] = ce->guc_id.id;
684 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
685 if (!enabled && !err) {
686 trace_intel_context_sched_enable(ce);
687 atomic_inc(&guc->outstanding_submission_g2h);
688 set_context_enabled(ce);
691 * Without multi-lrc KMD does the submission step (moving the
692 * lrc tail) so enabling scheduling is sufficient to submit the
693 * context. This isn't the case in multi-lrc submission as the
694 * GuC needs to move the tails, hence the need for another H2G
695 * to submit a multi-lrc context after enabling scheduling.
697 if (intel_context_is_parent(ce)) {
698 action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
699 err = intel_guc_send_nb(guc, action, len - 1, 0);
701 } else if (!enabled) {
702 clr_context_pending_enable(ce);
703 intel_context_put(ce);
706 trace_i915_request_guc_submit(rq);
709 spin_unlock(&ce->guc_state.lock);
713 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
715 int ret = __guc_add_request(guc, rq);
717 if (unlikely(ret == -EBUSY)) {
718 guc->stalled_request = rq;
719 guc->submission_stall_reason = STALL_ADD_REQUEST;
725 static inline void guc_set_lrc_tail(struct i915_request *rq)
727 rq->context->lrc_reg_state[CTX_RING_TAIL] =
728 intel_ring_set_tail(rq->ring, rq->tail);
731 static inline int rq_prio(const struct i915_request *rq)
733 return rq->sched.attr.priority;
736 static bool is_multi_lrc_rq(struct i915_request *rq)
738 return intel_context_is_parallel(rq->context);
741 static bool can_merge_rq(struct i915_request *rq,
742 struct i915_request *last)
744 return request_to_scheduling_context(rq) ==
745 request_to_scheduling_context(last);
748 static u32 wq_space_until_wrap(struct intel_context *ce)
750 return (WQ_SIZE - ce->parallel.guc.wqi_tail);
753 static void write_wqi(struct guc_process_desc *desc,
754 struct intel_context *ce,
757 BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
760 * Ensure WQI are visible before updating tail
762 intel_guc_write_barrier(ce_to_guc(ce));
764 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
766 WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
769 static int guc_wq_noop_append(struct intel_context *ce)
771 struct guc_process_desc *desc = __get_process_desc(ce);
772 u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
773 u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
778 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
780 *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
781 FIELD_PREP(WQ_LEN_MASK, len_dw);
782 ce->parallel.guc.wqi_tail = 0;
787 static int __guc_wq_item_append(struct i915_request *rq)
789 struct intel_context *ce = request_to_scheduling_context(rq);
790 struct intel_context *child;
791 struct guc_process_desc *desc = __get_process_desc(ce);
792 unsigned int wqi_size = (ce->parallel.number_children + 4) *
795 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
798 /* Ensure context is in correct state updating work queue */
799 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
800 GEM_BUG_ON(context_guc_id_invalid(ce));
801 GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
802 GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
804 /* Insert NOOP if this work queue item will wrap the tail pointer. */
805 if (wqi_size > wq_space_until_wrap(ce)) {
806 ret = guc_wq_noop_append(ce);
811 wqi = get_wq_pointer(desc, ce, wqi_size);
815 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
817 *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
818 FIELD_PREP(WQ_LEN_MASK, len_dw);
819 *wqi++ = ce->lrc.lrca;
820 *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
821 FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
822 *wqi++ = 0; /* fence_id */
823 for_each_child(ce, child)
824 *wqi++ = child->ring->tail / sizeof(u64);
826 write_wqi(desc, ce, wqi_size);
831 static int guc_wq_item_append(struct intel_guc *guc,
832 struct i915_request *rq)
834 struct intel_context *ce = request_to_scheduling_context(rq);
837 if (likely(!intel_context_is_banned(ce))) {
838 ret = __guc_wq_item_append(rq);
840 if (unlikely(ret == -EBUSY)) {
841 guc->stalled_request = rq;
842 guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
849 static bool multi_lrc_submit(struct i915_request *rq)
851 struct intel_context *ce = request_to_scheduling_context(rq);
853 intel_ring_set_tail(rq->ring, rq->tail);
856 * We expect the front end (execbuf IOCTL) to set this flag on the last
857 * request generated from a multi-BB submission. This indicates to the
858 * backend (GuC interface) that we should submit this context thus
859 * submitting all the requests generated in parallel.
861 return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
862 intel_context_is_banned(ce);
865 static int guc_dequeue_one_context(struct intel_guc *guc)
867 struct i915_sched_engine * const sched_engine = guc->sched_engine;
868 struct i915_request *last = NULL;
873 lockdep_assert_held(&sched_engine->lock);
875 if (guc->stalled_request) {
877 last = guc->stalled_request;
879 switch (guc->submission_stall_reason) {
880 case STALL_REGISTER_CONTEXT:
881 goto register_context;
882 case STALL_MOVE_LRC_TAIL:
884 case STALL_ADD_REQUEST:
887 MISSING_CASE(guc->submission_stall_reason);
891 while ((rb = rb_first_cached(&sched_engine->queue))) {
892 struct i915_priolist *p = to_priolist(rb);
893 struct i915_request *rq, *rn;
895 priolist_for_each_request_consume(rq, rn, p) {
896 if (last && !can_merge_rq(rq, last))
897 goto register_context;
899 list_del_init(&rq->sched.link);
901 __i915_request_submit(rq);
903 trace_i915_request_in(rq, 0);
906 if (is_multi_lrc_rq(rq)) {
908 * We need to coalesce all multi-lrc requests in
909 * a relationship into a single H2G. We are
910 * guaranteed that all of these requests will be
911 * submitted sequentially.
913 if (multi_lrc_submit(rq)) {
915 goto register_context;
922 rb_erase_cached(&p->node, &sched_engine->queue);
923 i915_priolist_free(p);
928 struct intel_context *ce = request_to_scheduling_context(last);
930 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
931 !intel_context_is_banned(ce))) {
932 ret = guc_lrc_desc_pin(ce, false);
933 if (unlikely(ret == -EPIPE)) {
935 } else if (ret == -EBUSY) {
936 guc->stalled_request = last;
937 guc->submission_stall_reason =
938 STALL_REGISTER_CONTEXT;
939 goto schedule_tasklet;
940 } else if (ret != 0) {
941 GEM_WARN_ON(ret); /* Unexpected */
947 if (is_multi_lrc_rq(last)) {
948 ret = guc_wq_item_append(guc, last);
950 goto schedule_tasklet;
951 } else if (ret != 0) {
952 GEM_WARN_ON(ret); /* Unexpected */
956 guc_set_lrc_tail(last);
960 ret = guc_add_request(guc, last);
961 if (unlikely(ret == -EPIPE)) {
963 } else if (ret == -EBUSY) {
964 goto schedule_tasklet;
965 } else if (ret != 0) {
966 GEM_WARN_ON(ret); /* Unexpected */
971 guc->stalled_request = NULL;
972 guc->submission_stall_reason = STALL_NONE;
976 sched_engine->tasklet.callback = NULL;
977 tasklet_disable_nosync(&sched_engine->tasklet);
981 tasklet_schedule(&sched_engine->tasklet);
985 static void guc_submission_tasklet(struct tasklet_struct *t)
987 struct i915_sched_engine *sched_engine =
988 from_tasklet(sched_engine, t, tasklet);
992 spin_lock_irqsave(&sched_engine->lock, flags);
995 loop = guc_dequeue_one_context(sched_engine->private_data);
998 i915_sched_engine_reset_on_empty(sched_engine);
1000 spin_unlock_irqrestore(&sched_engine->lock, flags);
1003 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
1005 if (iir & GT_RENDER_USER_INTERRUPT)
1006 intel_engine_signal_breadcrumbs(engine);
1009 static void __guc_context_destroy(struct intel_context *ce);
1010 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1011 static void guc_signal_context_fence(struct intel_context *ce);
1012 static void guc_cancel_context_requests(struct intel_context *ce);
1013 static void guc_blocked_fence_complete(struct intel_context *ce);
1015 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
1017 struct intel_context *ce;
1018 unsigned long index, flags;
1019 bool pending_disable, pending_enable, deregister, destroyed, banned;
1021 xa_lock_irqsave(&guc->context_lookup, flags);
1022 xa_for_each(&guc->context_lookup, index, ce) {
1024 * Corner case where the ref count on the object is zero but and
1025 * deregister G2H was lost. In this case we don't touch the ref
1026 * count and finish the destroy of the context.
1028 bool do_put = kref_get_unless_zero(&ce->ref);
1030 xa_unlock(&guc->context_lookup);
1032 spin_lock(&ce->guc_state.lock);
1035 * Once we are at this point submission_disabled() is guaranteed
1036 * to be visible to all callers who set the below flags (see above
1037 * flush and flushes in reset_prepare). If submission_disabled()
1038 * is set, the caller shouldn't set these flags.
1041 destroyed = context_destroyed(ce);
1042 pending_enable = context_pending_enable(ce);
1043 pending_disable = context_pending_disable(ce);
1044 deregister = context_wait_for_deregister_to_register(ce);
1045 banned = context_banned(ce);
1046 init_sched_state(ce);
1048 spin_unlock(&ce->guc_state.lock);
1050 if (pending_enable || destroyed || deregister) {
1051 decr_outstanding_submission_g2h(guc);
1053 guc_signal_context_fence(ce);
1055 intel_gt_pm_put_async(guc_to_gt(guc));
1056 release_guc_id(guc, ce);
1057 __guc_context_destroy(ce);
1059 if (pending_enable || deregister)
1060 intel_context_put(ce);
1063 /* Not mutualy exclusive with above if statement. */
1064 if (pending_disable) {
1065 guc_signal_context_fence(ce);
1067 guc_cancel_context_requests(ce);
1068 intel_engine_signal_breadcrumbs(ce->engine);
1070 intel_context_sched_disable_unpin(ce);
1071 decr_outstanding_submission_g2h(guc);
1073 spin_lock(&ce->guc_state.lock);
1074 guc_blocked_fence_complete(ce);
1075 spin_unlock(&ce->guc_state.lock);
1077 intel_context_put(ce);
1081 intel_context_put(ce);
1082 xa_lock(&guc->context_lookup);
1084 xa_unlock_irqrestore(&guc->context_lookup, flags);
1088 * GuC stores busyness stats for each engine at context in/out boundaries. A
1089 * context 'in' logs execution start time, 'out' adds in -> out delta to total.
1090 * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
1093 * __i915_pmu_event_read samples engine busyness. When sampling, if context id
1094 * is valid (!= ~0) and start is non-zero, the engine is considered to be
1095 * active. For an active engine total busyness = total + (now - start), where
1096 * 'now' is the time at which the busyness is sampled. For inactive engine,
1097 * total busyness = total.
1099 * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1101 * The start and total values provided by GuC are 32 bits and wrap around in a
1102 * few minutes. Since perf pmu provides busyness as 64 bit monotonically
1103 * increasing ns values, there is a need for this implementation to account for
1104 * overflows and extend the GuC provided values to 64 bits before returning
1105 * busyness to the user. In order to do that, a worker runs periodically at
1106 * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
1107 * 27 seconds for a gt clock frequency of 19.2 MHz).
1110 #define WRAP_TIME_CLKS U32_MAX
1111 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
1114 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
1116 u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1117 u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
1119 if (new_start == lower_32_bits(*prev_start))
1123 * When gt is unparked, we update the gt timestamp and start the ping
1124 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1125 * is unparked, all switched in contexts will have a start time that is
1126 * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
1128 * If neither gt_stamp nor new_start has rolled over, then the
1129 * gt_stamp_hi does not need to be adjusted, however if one of them has
1130 * rolled over, we need to adjust gt_stamp_hi accordingly.
1132 * The below conditions address the cases of new_start rollover and
1133 * gt_stamp_last rollover respectively.
1135 if (new_start < gt_stamp_last &&
1136 (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
1139 if (new_start > gt_stamp_last &&
1140 (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
1143 *prev_start = ((u64)gt_stamp_hi << 32) | new_start;
1146 #define record_read(map_, field_) \
1147 iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_)
1150 * GuC updates shared memory and KMD reads it. Since this is not synchronized,
1151 * we run into a race where the value read is inconsistent. Sometimes the
1152 * inconsistency is in reading the upper MSB bytes of the last_in value when
1153 * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
1154 * 24 bits are zero. Since these are non-zero values, it is non-trivial to
1155 * determine validity of these values. Instead we read the values multiple times
1156 * until they are consistent. In test runs, 3 attempts results in consistent
1157 * values. The upper bound is set to 6 attempts and may need to be tuned as per
1158 * any new occurences.
1160 static void __get_engine_usage_record(struct intel_engine_cs *engine,
1161 u32 *last_in, u32 *id, u32 *total)
1163 struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
1167 *last_in = record_read(&rec_map, last_switch_in_stamp);
1168 *id = record_read(&rec_map, current_context_index);
1169 *total = record_read(&rec_map, total_runtime);
1171 if (record_read(&rec_map, last_switch_in_stamp) == *last_in &&
1172 record_read(&rec_map, current_context_index) == *id &&
1173 record_read(&rec_map, total_runtime) == *total)
1178 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
1180 struct intel_engine_guc_stats *stats = &engine->stats.guc;
1181 struct intel_guc *guc = &engine->gt->uc.guc;
1182 u32 last_switch, ctx_id, total;
1184 lockdep_assert_held(&guc->timestamp.lock);
1186 __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
1188 stats->running = ctx_id != ~0U && last_switch;
1190 __extend_last_switch(guc, &stats->start_gt_clk, last_switch);
1193 * Instead of adjusting the total for overflow, just add the
1194 * difference from previous sample stats->total_gt_clks
1196 if (total && total != ~0U) {
1197 stats->total_gt_clks += (u32)(total - stats->prev_total);
1198 stats->prev_total = total;
1202 static u32 gpm_timestamp_shift(struct intel_gt *gt)
1204 intel_wakeref_t wakeref;
1207 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
1208 reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
1210 shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
1211 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
1216 static u64 gpm_timestamp(struct intel_gt *gt)
1218 u32 lo, hi, old_hi, loop = 0;
1220 hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
1222 lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
1224 hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
1225 } while (old_hi != hi && loop++ < 2);
1227 return ((u64)hi << 32) | lo;
1230 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
1232 struct intel_gt *gt = guc_to_gt(guc);
1233 u32 gt_stamp_lo, gt_stamp_hi;
1236 lockdep_assert_held(&guc->timestamp.lock);
1238 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1239 gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
1240 gt_stamp_lo = lower_32_bits(gpm_ts);
1243 if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
1246 guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
1250 * Unlike the execlist mode of submission total and active times are in terms of
1251 * gt clocks. The *now parameter is retained to return the cpu time at which the
1252 * busyness was sampled.
1254 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
1256 struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
1257 struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
1258 struct intel_gt *gt = engine->gt;
1259 struct intel_guc *guc = >->uc.guc;
1260 u64 total, gt_stamp_saved;
1261 unsigned long flags;
1265 spin_lock_irqsave(&guc->timestamp.lock, flags);
1268 * If a reset happened, we risk reading partially updated engine
1269 * busyness from GuC, so we just use the driver stored copy of busyness.
1270 * Synchronize with gt reset using reset_count and the
1271 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
1272 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
1273 * usable by checking the flag afterwards.
1275 reset_count = i915_reset_count(gpu_error);
1276 in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags);
1281 * The active busyness depends on start_gt_clk and gt_stamp.
1282 * gt_stamp is updated by i915 only when gt is awake and the
1283 * start_gt_clk is derived from GuC state. To get a consistent
1284 * view of activity, we query the GuC state only if gt is awake.
1286 if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
1287 stats_saved = *stats;
1288 gt_stamp_saved = guc->timestamp.gt_stamp;
1290 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
1291 * start_gt_clk' calculation below for active engines.
1293 guc_update_engine_gt_clks(engine);
1294 guc_update_pm_timestamp(guc, now);
1295 intel_gt_pm_put_async(gt);
1296 if (i915_reset_count(gpu_error) != reset_count) {
1297 *stats = stats_saved;
1298 guc->timestamp.gt_stamp = gt_stamp_saved;
1302 total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1303 if (stats->running) {
1304 u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
1306 total += intel_gt_clock_interval_to_ns(gt, clk);
1309 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1311 return ns_to_ktime(total);
1314 static void __reset_guc_busyness_stats(struct intel_guc *guc)
1316 struct intel_gt *gt = guc_to_gt(guc);
1317 struct intel_engine_cs *engine;
1318 enum intel_engine_id id;
1319 unsigned long flags;
1322 cancel_delayed_work_sync(&guc->timestamp.work);
1324 spin_lock_irqsave(&guc->timestamp.lock, flags);
1326 guc_update_pm_timestamp(guc, &unused);
1327 for_each_engine(engine, gt, id) {
1328 guc_update_engine_gt_clks(engine);
1329 engine->stats.guc.prev_total = 0;
1332 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1335 static void __update_guc_busyness_stats(struct intel_guc *guc)
1337 struct intel_gt *gt = guc_to_gt(guc);
1338 struct intel_engine_cs *engine;
1339 enum intel_engine_id id;
1340 unsigned long flags;
1343 spin_lock_irqsave(&guc->timestamp.lock, flags);
1345 guc_update_pm_timestamp(guc, &unused);
1346 for_each_engine(engine, gt, id)
1347 guc_update_engine_gt_clks(engine);
1349 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1352 static void guc_timestamp_ping(struct work_struct *wrk)
1354 struct intel_guc *guc = container_of(wrk, typeof(*guc),
1355 timestamp.work.work);
1356 struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1357 struct intel_gt *gt = guc_to_gt(guc);
1358 intel_wakeref_t wakeref;
1362 * Synchronize with gt reset to make sure the worker does not
1363 * corrupt the engine/guc stats.
1365 ret = intel_gt_reset_trylock(gt, &srcu);
1369 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
1370 __update_guc_busyness_stats(guc);
1372 intel_gt_reset_unlock(gt, srcu);
1374 mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1375 guc->timestamp.ping_delay);
1378 static int guc_action_enable_usage_stats(struct intel_guc *guc)
1380 u32 offset = intel_guc_engine_usage_offset(guc);
1382 INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
1387 return intel_guc_send(guc, action, ARRAY_SIZE(action));
1390 static void guc_init_engine_stats(struct intel_guc *guc)
1392 struct intel_gt *gt = guc_to_gt(guc);
1393 intel_wakeref_t wakeref;
1395 mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1396 guc->timestamp.ping_delay);
1398 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) {
1399 int ret = guc_action_enable_usage_stats(guc);
1402 drm_err(>->i915->drm,
1403 "Failed to enable usage stats: %d!\n", ret);
1407 void intel_guc_busyness_park(struct intel_gt *gt)
1409 struct intel_guc *guc = >->uc.guc;
1411 if (!guc_submission_initialized(guc))
1414 cancel_delayed_work(&guc->timestamp.work);
1415 __update_guc_busyness_stats(guc);
1418 void intel_guc_busyness_unpark(struct intel_gt *gt)
1420 struct intel_guc *guc = >->uc.guc;
1421 unsigned long flags;
1424 if (!guc_submission_initialized(guc))
1427 spin_lock_irqsave(&guc->timestamp.lock, flags);
1428 guc_update_pm_timestamp(guc, &unused);
1429 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1430 mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1431 guc->timestamp.ping_delay);
1435 submission_disabled(struct intel_guc *guc)
1437 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1439 return unlikely(!sched_engine ||
1440 !__tasklet_is_enabled(&sched_engine->tasklet) ||
1441 intel_gt_is_wedged(guc_to_gt(guc)));
1444 static void disable_submission(struct intel_guc *guc)
1446 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1448 if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1449 GEM_BUG_ON(!guc->ct.enabled);
1450 __tasklet_disable_sync_once(&sched_engine->tasklet);
1451 sched_engine->tasklet.callback = NULL;
1455 static void enable_submission(struct intel_guc *guc)
1457 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1458 unsigned long flags;
1460 spin_lock_irqsave(&guc->sched_engine->lock, flags);
1461 sched_engine->tasklet.callback = guc_submission_tasklet;
1462 wmb(); /* Make sure callback visible */
1463 if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1464 __tasklet_enable(&sched_engine->tasklet)) {
1465 GEM_BUG_ON(!guc->ct.enabled);
1467 /* And kick in case we missed a new request submission. */
1468 tasklet_hi_schedule(&sched_engine->tasklet);
1470 spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1473 static void guc_flush_submissions(struct intel_guc *guc)
1475 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1476 unsigned long flags;
1478 spin_lock_irqsave(&sched_engine->lock, flags);
1479 spin_unlock_irqrestore(&sched_engine->lock, flags);
1482 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1484 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
1486 if (unlikely(!guc_submission_initialized(guc))) {
1487 /* Reset called during driver load? GuC not yet initialised! */
1491 intel_gt_park_heartbeats(guc_to_gt(guc));
1492 disable_submission(guc);
1493 guc->interrupts.disable(guc);
1494 __reset_guc_busyness_stats(guc);
1496 /* Flush IRQ handler */
1497 spin_lock_irq(&guc_to_gt(guc)->irq_lock);
1498 spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
1500 guc_flush_submissions(guc);
1501 guc_flush_destroyed_contexts(guc);
1502 flush_work(&guc->ct.requests.worker);
1504 scrub_guc_desc_for_outstanding_g2h(guc);
1507 static struct intel_engine_cs *
1508 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
1510 struct intel_engine_cs *engine;
1511 intel_engine_mask_t tmp, mask = ve->mask;
1512 unsigned int num_siblings = 0;
1514 for_each_engine_masked(engine, ve->gt, mask, tmp)
1515 if (num_siblings++ == sibling)
1521 static inline struct intel_engine_cs *
1522 __context_to_physical_engine(struct intel_context *ce)
1524 struct intel_engine_cs *engine = ce->engine;
1526 if (intel_engine_is_virtual(engine))
1527 engine = guc_virtual_get_sibling(engine, 0);
1532 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1534 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1536 if (intel_context_is_banned(ce))
1539 GEM_BUG_ON(!intel_context_is_pinned(ce));
1542 * We want a simple context + ring to execute the breadcrumb update.
1543 * We cannot rely on the context being intact across the GPU hang,
1544 * so clear it and rebuild just what we need for the breadcrumb.
1545 * All pending requests for this context will be zapped, and any
1546 * future request will be after userspace has had the opportunity
1547 * to recreate its own state.
1550 lrc_init_regs(ce, engine, true);
1552 /* Rerun the request; its payload has been neutered (if guilty). */
1553 lrc_update_regs(ce, engine, head);
1556 static void guc_reset_nop(struct intel_engine_cs *engine)
1560 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
1565 __unwind_incomplete_requests(struct intel_context *ce)
1567 struct i915_request *rq, *rn;
1568 struct list_head *pl;
1569 int prio = I915_PRIORITY_INVALID;
1570 struct i915_sched_engine * const sched_engine =
1571 ce->engine->sched_engine;
1572 unsigned long flags;
1574 spin_lock_irqsave(&sched_engine->lock, flags);
1575 spin_lock(&ce->guc_state.lock);
1576 list_for_each_entry_safe_reverse(rq, rn,
1577 &ce->guc_state.requests,
1579 if (i915_request_completed(rq))
1582 list_del_init(&rq->sched.link);
1583 __i915_request_unsubmit(rq);
1585 /* Push the request back into the queue for later resubmission. */
1586 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
1587 if (rq_prio(rq) != prio) {
1589 pl = i915_sched_lookup_priolist(sched_engine, prio);
1591 GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1593 list_add(&rq->sched.link, pl);
1594 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1596 spin_unlock(&ce->guc_state.lock);
1597 spin_unlock_irqrestore(&sched_engine->lock, flags);
1600 static void __guc_reset_context(struct intel_context *ce, bool stalled)
1603 struct i915_request *rq;
1604 unsigned long flags;
1606 int i, number_children = ce->parallel.number_children;
1607 struct intel_context *parent = ce;
1609 GEM_BUG_ON(intel_context_is_child(ce));
1611 intel_context_get(ce);
1614 * GuC will implicitly mark the context as non-schedulable when it sends
1615 * the reset notification. Make sure our state reflects this change. The
1616 * context will be marked enabled on resubmission.
1618 spin_lock_irqsave(&ce->guc_state.lock, flags);
1619 clr_context_enabled(ce);
1620 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1623 * For each context in the relationship find the hanging request
1624 * resetting each context / request as needed
1626 for (i = 0; i < number_children + 1; ++i) {
1627 if (!intel_context_is_pinned(ce))
1630 local_stalled = false;
1631 rq = intel_context_find_active_request(ce);
1633 head = ce->ring->tail;
1637 if (i915_request_started(rq))
1638 local_stalled = true;
1640 GEM_BUG_ON(i915_active_is_idle(&ce->active));
1641 head = intel_ring_wrap(ce->ring, rq->head);
1643 __i915_request_reset(rq, local_stalled && stalled);
1645 guc_reset_state(ce, head, local_stalled && stalled);
1647 if (i != number_children)
1648 ce = list_next_entry(ce, parallel.child_link);
1651 __unwind_incomplete_requests(parent);
1652 intel_context_put(parent);
1655 void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
1657 struct intel_context *ce;
1658 unsigned long index;
1659 unsigned long flags;
1661 if (unlikely(!guc_submission_initialized(guc))) {
1662 /* Reset called during driver load? GuC not yet initialised! */
1666 xa_lock_irqsave(&guc->context_lookup, flags);
1667 xa_for_each(&guc->context_lookup, index, ce) {
1668 if (!kref_get_unless_zero(&ce->ref))
1671 xa_unlock(&guc->context_lookup);
1673 if (intel_context_is_pinned(ce) &&
1674 !intel_context_is_child(ce))
1675 __guc_reset_context(ce, stalled);
1677 intel_context_put(ce);
1679 xa_lock(&guc->context_lookup);
1681 xa_unlock_irqrestore(&guc->context_lookup, flags);
1683 /* GuC is blown away, drop all references to contexts */
1684 xa_destroy(&guc->context_lookup);
1687 static void guc_cancel_context_requests(struct intel_context *ce)
1689 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1690 struct i915_request *rq;
1691 unsigned long flags;
1693 /* Mark all executing requests as skipped. */
1694 spin_lock_irqsave(&sched_engine->lock, flags);
1695 spin_lock(&ce->guc_state.lock);
1696 list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1697 i915_request_put(i915_request_mark_eio(rq));
1698 spin_unlock(&ce->guc_state.lock);
1699 spin_unlock_irqrestore(&sched_engine->lock, flags);
1703 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1705 struct i915_request *rq, *rn;
1707 unsigned long flags;
1709 /* Can be called during boot if GuC fails to load */
1714 * Before we call engine->cancel_requests(), we should have exclusive
1715 * access to the submission state. This is arranged for us by the
1716 * caller disabling the interrupt generation, the tasklet and other
1717 * threads that may then access the same state, giving us a free hand
1718 * to reset state. However, we still need to let lockdep be aware that
1719 * we know this state may be accessed in hardirq context, so we
1720 * disable the irq around this manipulation and we want to keep
1721 * the spinlock focused on its duties and not accidentally conflate
1722 * coverage to the submission's irq state. (Similarly, although we
1723 * shouldn't need to disable irq around the manipulation of the
1724 * submission's irq state, we also wish to remind ourselves that
1727 spin_lock_irqsave(&sched_engine->lock, flags);
1729 /* Flush the queued requests to the timeline list (for retiring). */
1730 while ((rb = rb_first_cached(&sched_engine->queue))) {
1731 struct i915_priolist *p = to_priolist(rb);
1733 priolist_for_each_request_consume(rq, rn, p) {
1734 list_del_init(&rq->sched.link);
1736 __i915_request_submit(rq);
1738 i915_request_put(i915_request_mark_eio(rq));
1741 rb_erase_cached(&p->node, &sched_engine->queue);
1742 i915_priolist_free(p);
1745 /* Remaining _unready_ requests will be nop'ed when submitted */
1747 sched_engine->queue_priority_hint = INT_MIN;
1748 sched_engine->queue = RB_ROOT_CACHED;
1750 spin_unlock_irqrestore(&sched_engine->lock, flags);
1753 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1755 struct intel_context *ce;
1756 unsigned long index;
1757 unsigned long flags;
1759 xa_lock_irqsave(&guc->context_lookup, flags);
1760 xa_for_each(&guc->context_lookup, index, ce) {
1761 if (!kref_get_unless_zero(&ce->ref))
1764 xa_unlock(&guc->context_lookup);
1766 if (intel_context_is_pinned(ce) &&
1767 !intel_context_is_child(ce))
1768 guc_cancel_context_requests(ce);
1770 intel_context_put(ce);
1772 xa_lock(&guc->context_lookup);
1774 xa_unlock_irqrestore(&guc->context_lookup, flags);
1776 guc_cancel_sched_engine_requests(guc->sched_engine);
1778 /* GuC is blown away, drop all references to contexts */
1779 xa_destroy(&guc->context_lookup);
1782 void intel_guc_submission_reset_finish(struct intel_guc *guc)
1784 /* Reset called during driver load or during wedge? */
1785 if (unlikely(!guc_submission_initialized(guc) ||
1786 intel_gt_is_wedged(guc_to_gt(guc)))) {
1791 * Technically possible for either of these values to be non-zero here,
1792 * but very unlikely + harmless. Regardless let's add a warn so we can
1793 * see in CI if this happens frequently / a precursor to taking down the
1796 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
1797 atomic_set(&guc->outstanding_submission_g2h, 0);
1799 intel_guc_global_policies_update(guc);
1800 enable_submission(guc);
1801 intel_gt_unpark_heartbeats(guc_to_gt(guc));
1804 static void destroyed_worker_func(struct work_struct *w);
1805 static void reset_fail_worker_func(struct work_struct *w);
1808 * Set up the memory resources to be shared with the GuC (via the GGTT)
1809 * at firmware loading time.
1811 int intel_guc_submission_init(struct intel_guc *guc)
1813 struct intel_gt *gt = guc_to_gt(guc);
1816 if (guc->submission_initialized)
1819 ret = guc_lrc_desc_pool_create(guc);
1823 * Keep static analysers happy, let them know that we allocated the
1824 * vma after testing that it didn't exist earlier.
1826 GEM_BUG_ON(!guc->lrc_desc_pool);
1828 guc->submission_state.guc_ids_bitmap =
1829 bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
1830 if (!guc->submission_state.guc_ids_bitmap)
1833 guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
1834 guc->timestamp.shift = gpm_timestamp_shift(gt);
1835 guc->submission_initialized = true;
1840 void intel_guc_submission_fini(struct intel_guc *guc)
1842 if (!guc->submission_initialized)
1845 guc_flush_destroyed_contexts(guc);
1846 guc_lrc_desc_pool_destroy(guc);
1847 i915_sched_engine_put(guc->sched_engine);
1848 bitmap_free(guc->submission_state.guc_ids_bitmap);
1849 guc->submission_initialized = false;
1852 static inline void queue_request(struct i915_sched_engine *sched_engine,
1853 struct i915_request *rq,
1856 GEM_BUG_ON(!list_empty(&rq->sched.link));
1857 list_add_tail(&rq->sched.link,
1858 i915_sched_lookup_priolist(sched_engine, prio));
1859 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1860 tasklet_hi_schedule(&sched_engine->tasklet);
1863 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
1864 struct i915_request *rq)
1868 __i915_request_submit(rq);
1870 trace_i915_request_in(rq, 0);
1872 if (is_multi_lrc_rq(rq)) {
1873 if (multi_lrc_submit(rq)) {
1874 ret = guc_wq_item_append(guc, rq);
1876 ret = guc_add_request(guc, rq);
1879 guc_set_lrc_tail(rq);
1880 ret = guc_add_request(guc, rq);
1883 if (unlikely(ret == -EPIPE))
1884 disable_submission(guc);
1889 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
1891 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1892 struct intel_context *ce = request_to_scheduling_context(rq);
1894 return submission_disabled(guc) || guc->stalled_request ||
1895 !i915_sched_engine_is_empty(sched_engine) ||
1896 !ctx_id_mapped(guc, ce->guc_id.id);
1899 static void guc_submit_request(struct i915_request *rq)
1901 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1902 struct intel_guc *guc = &rq->engine->gt->uc.guc;
1903 unsigned long flags;
1905 /* Will be called from irq-context when using foreign fences. */
1906 spin_lock_irqsave(&sched_engine->lock, flags);
1908 if (need_tasklet(guc, rq))
1909 queue_request(sched_engine, rq, rq_prio(rq));
1910 else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
1911 tasklet_hi_schedule(&sched_engine->tasklet);
1913 spin_unlock_irqrestore(&sched_engine->lock, flags);
1916 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
1920 GEM_BUG_ON(intel_context_is_child(ce));
1922 if (intel_context_is_parent(ce))
1923 ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
1924 NUMBER_MULTI_LRC_GUC_ID(guc),
1925 order_base_2(ce->parallel.number_children
1928 ret = ida_simple_get(&guc->submission_state.guc_ids,
1929 NUMBER_MULTI_LRC_GUC_ID(guc),
1930 guc->submission_state.num_guc_ids,
1931 GFP_KERNEL | __GFP_RETRY_MAYFAIL |
1933 if (unlikely(ret < 0))
1936 ce->guc_id.id = ret;
1940 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1942 GEM_BUG_ON(intel_context_is_child(ce));
1944 if (!context_guc_id_invalid(ce)) {
1945 if (intel_context_is_parent(ce))
1946 bitmap_release_region(guc->submission_state.guc_ids_bitmap,
1948 order_base_2(ce->parallel.number_children
1951 ida_simple_remove(&guc->submission_state.guc_ids,
1953 clr_ctx_id_mapping(guc, ce->guc_id.id);
1954 set_context_guc_id_invalid(ce);
1956 if (!list_empty(&ce->guc_id.link))
1957 list_del_init(&ce->guc_id.link);
1960 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1962 unsigned long flags;
1964 spin_lock_irqsave(&guc->submission_state.lock, flags);
1965 __release_guc_id(guc, ce);
1966 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
1969 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
1971 struct intel_context *cn;
1973 lockdep_assert_held(&guc->submission_state.lock);
1974 GEM_BUG_ON(intel_context_is_child(ce));
1975 GEM_BUG_ON(intel_context_is_parent(ce));
1977 if (!list_empty(&guc->submission_state.guc_id_list)) {
1978 cn = list_first_entry(&guc->submission_state.guc_id_list,
1979 struct intel_context,
1982 GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
1983 GEM_BUG_ON(context_guc_id_invalid(cn));
1984 GEM_BUG_ON(intel_context_is_child(cn));
1985 GEM_BUG_ON(intel_context_is_parent(cn));
1987 list_del_init(&cn->guc_id.link);
1988 ce->guc_id.id = cn->guc_id.id;
1990 spin_lock(&cn->guc_state.lock);
1991 clr_context_registered(cn);
1992 spin_unlock(&cn->guc_state.lock);
1994 set_context_guc_id_invalid(cn);
1996 #ifdef CONFIG_DRM_I915_SELFTEST
1997 guc->number_guc_id_stolen++;
2006 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
2010 lockdep_assert_held(&guc->submission_state.lock);
2011 GEM_BUG_ON(intel_context_is_child(ce));
2013 ret = new_guc_id(guc, ce);
2014 if (unlikely(ret < 0)) {
2015 if (intel_context_is_parent(ce))
2018 ret = steal_guc_id(guc, ce);
2023 if (intel_context_is_parent(ce)) {
2024 struct intel_context *child;
2027 for_each_child(ce, child)
2028 child->guc_id.id = ce->guc_id.id + i++;
2034 #define PIN_GUC_ID_TRIES 4
2035 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2038 unsigned long flags, tries = PIN_GUC_ID_TRIES;
2040 GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
2043 spin_lock_irqsave(&guc->submission_state.lock, flags);
2045 might_lock(&ce->guc_state.lock);
2047 if (context_guc_id_invalid(ce)) {
2048 ret = assign_guc_id(guc, ce);
2051 ret = 1; /* Indidcates newly assigned guc_id */
2053 if (!list_empty(&ce->guc_id.link))
2054 list_del_init(&ce->guc_id.link);
2055 atomic_inc(&ce->guc_id.ref);
2058 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2061 * -EAGAIN indicates no guc_id are available, let's retire any
2062 * outstanding requests to see if that frees up a guc_id. If the first
2063 * retire didn't help, insert a sleep with the timeslice duration before
2064 * attempting to retire more requests. Double the sleep period each
2065 * subsequent pass before finally giving up. The sleep period has max of
2066 * 100ms and minimum of 1ms.
2068 if (ret == -EAGAIN && --tries) {
2069 if (PIN_GUC_ID_TRIES - tries > 1) {
2070 unsigned int timeslice_shifted =
2071 ce->engine->props.timeslice_duration_ms <<
2072 (PIN_GUC_ID_TRIES - tries - 2);
2073 unsigned int max = min_t(unsigned int, 100,
2076 msleep(max_t(unsigned int, max, 1));
2078 intel_gt_retire_requests(guc_to_gt(guc));
2085 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2087 unsigned long flags;
2089 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2090 GEM_BUG_ON(intel_context_is_child(ce));
2092 if (unlikely(context_guc_id_invalid(ce) ||
2093 intel_context_is_parent(ce)))
2096 spin_lock_irqsave(&guc->submission_state.lock, flags);
2097 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2098 !atomic_read(&ce->guc_id.ref))
2099 list_add_tail(&ce->guc_id.link,
2100 &guc->submission_state.guc_id_list);
2101 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2104 static int __guc_action_register_multi_lrc(struct intel_guc *guc,
2105 struct intel_context *ce,
2110 struct intel_context *child;
2111 u32 action[4 + MAX_ENGINE_INSTANCE];
2114 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2116 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2117 action[len++] = guc_id;
2118 action[len++] = ce->parallel.number_children + 1;
2119 action[len++] = offset;
2120 for_each_child(ce, child) {
2121 offset += sizeof(struct guc_lrc_desc);
2122 action[len++] = offset;
2125 return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2128 static int __guc_action_register_context(struct intel_guc *guc,
2134 INTEL_GUC_ACTION_REGISTER_CONTEXT,
2139 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2143 static int register_context(struct intel_context *ce, bool loop)
2145 struct intel_guc *guc = ce_to_guc(ce);
2146 u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
2147 ce->guc_id.id * sizeof(struct guc_lrc_desc);
2150 GEM_BUG_ON(intel_context_is_child(ce));
2151 trace_intel_context_register(ce);
2153 if (intel_context_is_parent(ce))
2154 ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
2157 ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
2160 unsigned long flags;
2162 spin_lock_irqsave(&ce->guc_state.lock, flags);
2163 set_context_registered(ce);
2164 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2170 static int __guc_action_deregister_context(struct intel_guc *guc,
2174 INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
2178 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2179 G2H_LEN_DW_DEREGISTER_CONTEXT,
2183 static int deregister_context(struct intel_context *ce, u32 guc_id)
2185 struct intel_guc *guc = ce_to_guc(ce);
2187 GEM_BUG_ON(intel_context_is_child(ce));
2188 trace_intel_context_deregister(ce);
2190 return __guc_action_deregister_context(guc, guc_id);
2193 static inline void clear_children_join_go_memory(struct intel_context *ce)
2195 struct parent_scratch *ps = __get_parent_scratch(ce);
2198 ps->go.semaphore = 0;
2199 for (i = 0; i < ce->parallel.number_children + 1; ++i)
2200 ps->join[i].semaphore = 0;
2203 static inline u32 get_children_go_value(struct intel_context *ce)
2205 return __get_parent_scratch(ce)->go.semaphore;
2208 static inline u32 get_children_join_value(struct intel_context *ce,
2211 return __get_parent_scratch(ce)->join[child_index].semaphore;
2214 static void guc_context_policy_init(struct intel_engine_cs *engine,
2215 struct guc_lrc_desc *desc)
2217 desc->policy_flags = 0;
2219 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2220 desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
2222 /* NB: For both of these, zero means disabled. */
2223 desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
2224 desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2227 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
2229 struct intel_engine_cs *engine = ce->engine;
2230 struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
2231 struct intel_guc *guc = &engine->gt->uc.guc;
2232 u32 desc_idx = ce->guc_id.id;
2233 struct guc_lrc_desc *desc;
2234 bool context_registered;
2235 intel_wakeref_t wakeref;
2236 struct intel_context *child;
2239 GEM_BUG_ON(!engine->mask);
2240 GEM_BUG_ON(!sched_state_is_init(ce));
2243 * Ensure LRC + CT vmas are is same region as write barrier is done
2244 * based on CT vma region.
2246 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2247 i915_gem_object_is_lmem(ce->ring->vma->obj));
2249 context_registered = ctx_id_mapped(guc, desc_idx);
2251 clr_ctx_id_mapping(guc, desc_idx);
2252 set_ctx_id_mapping(guc, desc_idx, ce);
2254 desc = __get_lrc_desc(guc, desc_idx);
2255 desc->engine_class = engine_class_to_guc_class(engine->class);
2256 desc->engine_submit_mask = engine->logical_mask;
2257 desc->hw_context_desc = ce->lrc.lrca;
2258 desc->priority = ce->guc_state.prio;
2259 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2260 guc_context_policy_init(engine, desc);
2263 * If context is a parent, we need to register a process descriptor
2264 * describing a work queue and register all child contexts.
2266 if (intel_context_is_parent(ce)) {
2267 struct guc_process_desc *pdesc;
2269 ce->parallel.guc.wqi_tail = 0;
2270 ce->parallel.guc.wqi_head = 0;
2272 desc->process_desc = i915_ggtt_offset(ce->state) +
2273 __get_parent_scratch_offset(ce);
2274 desc->wq_addr = i915_ggtt_offset(ce->state) +
2275 __get_wq_offset(ce);
2276 desc->wq_size = WQ_SIZE;
2278 pdesc = __get_process_desc(ce);
2279 memset(pdesc, 0, sizeof(*(pdesc)));
2280 pdesc->stage_id = ce->guc_id.id;
2281 pdesc->wq_base_addr = desc->wq_addr;
2282 pdesc->wq_size_bytes = desc->wq_size;
2283 pdesc->wq_status = WQ_STATUS_ACTIVE;
2285 for_each_child(ce, child) {
2286 desc = __get_lrc_desc(guc, child->guc_id.id);
2288 desc->engine_class =
2289 engine_class_to_guc_class(engine->class);
2290 desc->hw_context_desc = child->lrc.lrca;
2291 desc->priority = ce->guc_state.prio;
2292 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2293 guc_context_policy_init(engine, desc);
2296 clear_children_join_go_memory(ce);
2300 * The context_lookup xarray is used to determine if the hardware
2301 * context is currently registered. There are two cases in which it
2302 * could be registered either the guc_id has been stolen from another
2303 * context or the lrc descriptor address of this context has changed. In
2304 * either case the context needs to be deregistered with the GuC before
2305 * registering this context.
2307 if (context_registered) {
2309 unsigned long flags;
2311 trace_intel_context_steal_guc_id(ce);
2314 /* Seal race with Reset */
2315 spin_lock_irqsave(&ce->guc_state.lock, flags);
2316 disabled = submission_disabled(guc);
2317 if (likely(!disabled)) {
2318 set_context_wait_for_deregister_to_register(ce);
2319 intel_context_get(ce);
2321 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2322 if (unlikely(disabled)) {
2323 clr_ctx_id_mapping(guc, desc_idx);
2324 return 0; /* Will get registered later */
2328 * If stealing the guc_id, this ce has the same guc_id as the
2329 * context whose guc_id was stolen.
2331 with_intel_runtime_pm(runtime_pm, wakeref)
2332 ret = deregister_context(ce, ce->guc_id.id);
2333 if (unlikely(ret == -ENODEV))
2334 ret = 0; /* Will get registered later */
2336 with_intel_runtime_pm(runtime_pm, wakeref)
2337 ret = register_context(ce, loop);
2338 if (unlikely(ret == -EBUSY)) {
2339 clr_ctx_id_mapping(guc, desc_idx);
2340 } else if (unlikely(ret == -ENODEV)) {
2341 clr_ctx_id_mapping(guc, desc_idx);
2342 ret = 0; /* Will get registered later */
2349 static int __guc_context_pre_pin(struct intel_context *ce,
2350 struct intel_engine_cs *engine,
2351 struct i915_gem_ww_ctx *ww,
2354 return lrc_pre_pin(ce, engine, ww, vaddr);
2357 static int __guc_context_pin(struct intel_context *ce,
2358 struct intel_engine_cs *engine,
2361 if (i915_ggtt_offset(ce->state) !=
2362 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2363 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2366 * GuC context gets pinned in guc_request_alloc. See that function for
2367 * explaination of why.
2370 return lrc_pin(ce, engine, vaddr);
2373 static int guc_context_pre_pin(struct intel_context *ce,
2374 struct i915_gem_ww_ctx *ww,
2377 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2380 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2382 int ret = __guc_context_pin(ce, ce->engine, vaddr);
2384 if (likely(!ret && !intel_context_is_barrier(ce)))
2385 intel_engine_pm_get(ce->engine);
2390 static void guc_context_unpin(struct intel_context *ce)
2392 struct intel_guc *guc = ce_to_guc(ce);
2394 unpin_guc_id(guc, ce);
2397 if (likely(!intel_context_is_barrier(ce)))
2398 intel_engine_pm_put_async(ce->engine);
2401 static void guc_context_post_unpin(struct intel_context *ce)
2406 static void __guc_context_sched_enable(struct intel_guc *guc,
2407 struct intel_context *ce)
2410 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2415 trace_intel_context_sched_enable(ce);
2417 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2418 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2421 static void __guc_context_sched_disable(struct intel_guc *guc,
2422 struct intel_context *ce,
2426 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2427 guc_id, /* ce->guc_id.id not stable */
2431 GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
2433 GEM_BUG_ON(intel_context_is_child(ce));
2434 trace_intel_context_sched_disable(ce);
2436 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2437 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2440 static void guc_blocked_fence_complete(struct intel_context *ce)
2442 lockdep_assert_held(&ce->guc_state.lock);
2444 if (!i915_sw_fence_done(&ce->guc_state.blocked))
2445 i915_sw_fence_complete(&ce->guc_state.blocked);
2448 static void guc_blocked_fence_reinit(struct intel_context *ce)
2450 lockdep_assert_held(&ce->guc_state.lock);
2451 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
2454 * This fence is always complete unless a pending schedule disable is
2455 * outstanding. We arm the fence here and complete it when we receive
2456 * the pending schedule disable complete message.
2458 i915_sw_fence_fini(&ce->guc_state.blocked);
2459 i915_sw_fence_reinit(&ce->guc_state.blocked);
2460 i915_sw_fence_await(&ce->guc_state.blocked);
2461 i915_sw_fence_commit(&ce->guc_state.blocked);
2464 static u16 prep_context_pending_disable(struct intel_context *ce)
2466 lockdep_assert_held(&ce->guc_state.lock);
2468 set_context_pending_disable(ce);
2469 clr_context_enabled(ce);
2470 guc_blocked_fence_reinit(ce);
2471 intel_context_get(ce);
2473 return ce->guc_id.id;
2476 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
2478 struct intel_guc *guc = ce_to_guc(ce);
2479 unsigned long flags;
2480 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2481 intel_wakeref_t wakeref;
2485 GEM_BUG_ON(intel_context_is_child(ce));
2487 spin_lock_irqsave(&ce->guc_state.lock, flags);
2489 incr_context_blocked(ce);
2491 enabled = context_enabled(ce);
2492 if (unlikely(!enabled || submission_disabled(guc))) {
2494 clr_context_enabled(ce);
2495 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2496 return &ce->guc_state.blocked;
2500 * We add +2 here as the schedule disable complete CTB handler calls
2501 * intel_context_sched_disable_unpin (-2 to pin_count).
2503 atomic_add(2, &ce->pin_count);
2505 guc_id = prep_context_pending_disable(ce);
2507 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2509 with_intel_runtime_pm(runtime_pm, wakeref)
2510 __guc_context_sched_disable(guc, ce, guc_id);
2512 return &ce->guc_state.blocked;
2515 #define SCHED_STATE_MULTI_BLOCKED_MASK \
2516 (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
2517 #define SCHED_STATE_NO_UNBLOCK \
2518 (SCHED_STATE_MULTI_BLOCKED_MASK | \
2519 SCHED_STATE_PENDING_DISABLE | \
2522 static bool context_cant_unblock(struct intel_context *ce)
2524 lockdep_assert_held(&ce->guc_state.lock);
2526 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
2527 context_guc_id_invalid(ce) ||
2528 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
2529 !intel_context_is_pinned(ce);
2532 static void guc_context_unblock(struct intel_context *ce)
2534 struct intel_guc *guc = ce_to_guc(ce);
2535 unsigned long flags;
2536 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2537 intel_wakeref_t wakeref;
2540 GEM_BUG_ON(context_enabled(ce));
2541 GEM_BUG_ON(intel_context_is_child(ce));
2543 spin_lock_irqsave(&ce->guc_state.lock, flags);
2545 if (unlikely(submission_disabled(guc) ||
2546 context_cant_unblock(ce))) {
2550 set_context_pending_enable(ce);
2551 set_context_enabled(ce);
2552 intel_context_get(ce);
2555 decr_context_blocked(ce);
2557 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2560 with_intel_runtime_pm(runtime_pm, wakeref)
2561 __guc_context_sched_enable(guc, ce);
2565 static void guc_context_cancel_request(struct intel_context *ce,
2566 struct i915_request *rq)
2568 struct intel_context *block_context =
2569 request_to_scheduling_context(rq);
2571 if (i915_sw_fence_signaled(&rq->submit)) {
2572 struct i915_sw_fence *fence;
2574 intel_context_get(ce);
2575 fence = guc_context_block(block_context);
2576 i915_sw_fence_wait(fence);
2577 if (!i915_request_completed(rq)) {
2578 __i915_request_skip(rq);
2579 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
2583 guc_context_unblock(block_context);
2584 intel_context_put(ce);
2588 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
2590 u32 preemption_timeout)
2593 INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
2598 intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2601 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
2603 struct intel_guc *guc = ce_to_guc(ce);
2604 struct intel_runtime_pm *runtime_pm =
2605 &ce->engine->gt->i915->runtime_pm;
2606 intel_wakeref_t wakeref;
2607 unsigned long flags;
2609 GEM_BUG_ON(intel_context_is_child(ce));
2611 guc_flush_submissions(guc);
2613 spin_lock_irqsave(&ce->guc_state.lock, flags);
2614 set_context_banned(ce);
2616 if (submission_disabled(guc) ||
2617 (!context_enabled(ce) && !context_pending_disable(ce))) {
2618 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2620 guc_cancel_context_requests(ce);
2621 intel_engine_signal_breadcrumbs(ce->engine);
2622 } else if (!context_pending_disable(ce)) {
2626 * We add +2 here as the schedule disable complete CTB handler
2627 * calls intel_context_sched_disable_unpin (-2 to pin_count).
2629 atomic_add(2, &ce->pin_count);
2631 guc_id = prep_context_pending_disable(ce);
2632 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2635 * In addition to disabling scheduling, set the preemption
2636 * timeout to the minimum value (1 us) so the banned context
2637 * gets kicked off the HW ASAP.
2639 with_intel_runtime_pm(runtime_pm, wakeref) {
2640 __guc_context_set_preemption_timeout(guc, guc_id, 1);
2641 __guc_context_sched_disable(guc, ce, guc_id);
2644 if (!context_guc_id_invalid(ce))
2645 with_intel_runtime_pm(runtime_pm, wakeref)
2646 __guc_context_set_preemption_timeout(guc,
2649 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2653 static void guc_context_sched_disable(struct intel_context *ce)
2655 struct intel_guc *guc = ce_to_guc(ce);
2656 unsigned long flags;
2657 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
2658 intel_wakeref_t wakeref;
2661 GEM_BUG_ON(intel_context_is_child(ce));
2663 spin_lock_irqsave(&ce->guc_state.lock, flags);
2666 * We have to check if the context has been disabled by another thread,
2667 * check if submssion has been disabled to seal a race with reset and
2668 * finally check if any more requests have been committed to the
2669 * context ensursing that a request doesn't slip through the
2670 * 'context_pending_disable' fence.
2672 if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
2673 context_has_committed_requests(ce))) {
2674 clr_context_enabled(ce);
2675 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2678 guc_id = prep_context_pending_disable(ce);
2680 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2682 with_intel_runtime_pm(runtime_pm, wakeref)
2683 __guc_context_sched_disable(guc, ce, guc_id);
2687 intel_context_sched_disable_unpin(ce);
2690 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
2692 struct intel_guc *guc = ce_to_guc(ce);
2693 struct intel_gt *gt = guc_to_gt(guc);
2694 unsigned long flags;
2697 GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
2698 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
2699 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
2700 GEM_BUG_ON(context_enabled(ce));
2702 /* Seal race with Reset */
2703 spin_lock_irqsave(&ce->guc_state.lock, flags);
2704 disabled = submission_disabled(guc);
2705 if (likely(!disabled)) {
2706 __intel_gt_pm_get(gt);
2707 set_context_destroyed(ce);
2708 clr_context_registered(ce);
2710 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2711 if (unlikely(disabled)) {
2712 release_guc_id(guc, ce);
2713 __guc_context_destroy(ce);
2717 deregister_context(ce, ce->guc_id.id);
2720 static void __guc_context_destroy(struct intel_context *ce)
2722 GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
2723 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
2724 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
2725 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
2726 GEM_BUG_ON(ce->guc_state.number_committed_requests);
2729 intel_context_fini(ce);
2731 if (intel_engine_is_virtual(ce->engine)) {
2732 struct guc_virtual_engine *ve =
2733 container_of(ce, typeof(*ve), context);
2735 if (ve->base.breadcrumbs)
2736 intel_breadcrumbs_put(ve->base.breadcrumbs);
2740 intel_context_free(ce);
2744 static void guc_flush_destroyed_contexts(struct intel_guc *guc)
2746 struct intel_context *ce;
2747 unsigned long flags;
2749 GEM_BUG_ON(!submission_disabled(guc) &&
2750 guc_submission_initialized(guc));
2752 while (!list_empty(&guc->submission_state.destroyed_contexts)) {
2753 spin_lock_irqsave(&guc->submission_state.lock, flags);
2754 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
2755 struct intel_context,
2758 list_del_init(&ce->destroyed_link);
2759 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2764 release_guc_id(guc, ce);
2765 __guc_context_destroy(ce);
2769 static void deregister_destroyed_contexts(struct intel_guc *guc)
2771 struct intel_context *ce;
2772 unsigned long flags;
2774 while (!list_empty(&guc->submission_state.destroyed_contexts)) {
2775 spin_lock_irqsave(&guc->submission_state.lock, flags);
2776 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
2777 struct intel_context,
2780 list_del_init(&ce->destroyed_link);
2781 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2786 guc_lrc_desc_unpin(ce);
2790 static void destroyed_worker_func(struct work_struct *w)
2792 struct intel_guc *guc = container_of(w, struct intel_guc,
2793 submission_state.destroyed_worker);
2794 struct intel_gt *gt = guc_to_gt(guc);
2797 with_intel_gt_pm(gt, tmp)
2798 deregister_destroyed_contexts(guc);
2801 static void guc_context_destroy(struct kref *kref)
2803 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
2804 struct intel_guc *guc = ce_to_guc(ce);
2805 unsigned long flags;
2809 * If the guc_id is invalid this context has been stolen and we can free
2810 * it immediately. Also can be freed immediately if the context is not
2811 * registered with the GuC or the GuC is in the middle of a reset.
2813 spin_lock_irqsave(&guc->submission_state.lock, flags);
2814 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
2815 !ctx_id_mapped(guc, ce->guc_id.id);
2816 if (likely(!destroy)) {
2817 if (!list_empty(&ce->guc_id.link))
2818 list_del_init(&ce->guc_id.link);
2819 list_add_tail(&ce->destroyed_link,
2820 &guc->submission_state.destroyed_contexts);
2822 __release_guc_id(guc, ce);
2824 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2825 if (unlikely(destroy)) {
2826 __guc_context_destroy(ce);
2831 * We use a worker to issue the H2G to deregister the context as we can
2832 * take the GT PM for the first time which isn't allowed from an atomic
2835 queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
2838 static int guc_context_alloc(struct intel_context *ce)
2840 return lrc_alloc(ce, ce->engine);
2843 static void guc_context_set_prio(struct intel_guc *guc,
2844 struct intel_context *ce,
2848 INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
2853 GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
2854 prio > GUC_CLIENT_PRIORITY_NORMAL);
2855 lockdep_assert_held(&ce->guc_state.lock);
2857 if (ce->guc_state.prio == prio || submission_disabled(guc) ||
2858 !context_registered(ce)) {
2859 ce->guc_state.prio = prio;
2863 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2865 ce->guc_state.prio = prio;
2866 trace_intel_context_set_prio(ce);
2869 static inline u8 map_i915_prio_to_guc_prio(int prio)
2871 if (prio == I915_PRIORITY_NORMAL)
2872 return GUC_CLIENT_PRIORITY_KMD_NORMAL;
2873 else if (prio < I915_PRIORITY_NORMAL)
2874 return GUC_CLIENT_PRIORITY_NORMAL;
2875 else if (prio < I915_PRIORITY_DISPLAY)
2876 return GUC_CLIENT_PRIORITY_HIGH;
2878 return GUC_CLIENT_PRIORITY_KMD_HIGH;
2881 static inline void add_context_inflight_prio(struct intel_context *ce,
2884 lockdep_assert_held(&ce->guc_state.lock);
2885 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
2887 ++ce->guc_state.prio_count[guc_prio];
2889 /* Overflow protection */
2890 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
2893 static inline void sub_context_inflight_prio(struct intel_context *ce,
2896 lockdep_assert_held(&ce->guc_state.lock);
2897 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
2899 /* Underflow protection */
2900 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
2902 --ce->guc_state.prio_count[guc_prio];
2905 static inline void update_context_prio(struct intel_context *ce)
2907 struct intel_guc *guc = &ce->engine->gt->uc.guc;
2910 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
2911 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
2913 lockdep_assert_held(&ce->guc_state.lock);
2915 for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
2916 if (ce->guc_state.prio_count[i]) {
2917 guc_context_set_prio(guc, ce, i);
2923 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
2925 /* Lower value is higher priority */
2926 return new_guc_prio < old_guc_prio;
2929 static void add_to_context(struct i915_request *rq)
2931 struct intel_context *ce = request_to_scheduling_context(rq);
2932 u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
2934 GEM_BUG_ON(intel_context_is_child(ce));
2935 GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
2937 spin_lock(&ce->guc_state.lock);
2938 list_move_tail(&rq->sched.link, &ce->guc_state.requests);
2940 if (rq->guc_prio == GUC_PRIO_INIT) {
2941 rq->guc_prio = new_guc_prio;
2942 add_context_inflight_prio(ce, rq->guc_prio);
2943 } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
2944 sub_context_inflight_prio(ce, rq->guc_prio);
2945 rq->guc_prio = new_guc_prio;
2946 add_context_inflight_prio(ce, rq->guc_prio);
2948 update_context_prio(ce);
2950 spin_unlock(&ce->guc_state.lock);
2953 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
2955 lockdep_assert_held(&ce->guc_state.lock);
2957 if (rq->guc_prio != GUC_PRIO_INIT &&
2958 rq->guc_prio != GUC_PRIO_FINI) {
2959 sub_context_inflight_prio(ce, rq->guc_prio);
2960 update_context_prio(ce);
2962 rq->guc_prio = GUC_PRIO_FINI;
2965 static void remove_from_context(struct i915_request *rq)
2967 struct intel_context *ce = request_to_scheduling_context(rq);
2969 GEM_BUG_ON(intel_context_is_child(ce));
2971 spin_lock_irq(&ce->guc_state.lock);
2973 list_del_init(&rq->sched.link);
2974 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2976 /* Prevent further __await_execution() registering a cb, then flush */
2977 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
2979 guc_prio_fini(rq, ce);
2981 decr_context_committed_requests(ce);
2983 spin_unlock_irq(&ce->guc_state.lock);
2985 atomic_dec(&ce->guc_id.ref);
2986 i915_request_notify_execute_cb_imm(rq);
2989 static const struct intel_context_ops guc_context_ops = {
2990 .alloc = guc_context_alloc,
2992 .pre_pin = guc_context_pre_pin,
2993 .pin = guc_context_pin,
2994 .unpin = guc_context_unpin,
2995 .post_unpin = guc_context_post_unpin,
2997 .ban = guc_context_ban,
2999 .cancel_request = guc_context_cancel_request,
3001 .enter = intel_context_enter_engine,
3002 .exit = intel_context_exit_engine,
3004 .sched_disable = guc_context_sched_disable,
3007 .destroy = guc_context_destroy,
3009 .create_virtual = guc_create_virtual,
3010 .create_parallel = guc_create_parallel,
3013 static void submit_work_cb(struct irq_work *wrk)
3015 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
3017 might_lock(&rq->engine->sched_engine->lock);
3018 i915_sw_fence_complete(&rq->submit);
3021 static void __guc_signal_context_fence(struct intel_context *ce)
3023 struct i915_request *rq, *rn;
3025 lockdep_assert_held(&ce->guc_state.lock);
3027 if (!list_empty(&ce->guc_state.fences))
3028 trace_intel_context_fence_release(ce);
3031 * Use an IRQ to ensure locking order of sched_engine->lock ->
3032 * ce->guc_state.lock is preserved.
3034 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
3036 list_del(&rq->guc_fence_link);
3037 irq_work_queue(&rq->submit_work);
3040 INIT_LIST_HEAD(&ce->guc_state.fences);
3043 static void guc_signal_context_fence(struct intel_context *ce)
3045 unsigned long flags;
3047 GEM_BUG_ON(intel_context_is_child(ce));
3049 spin_lock_irqsave(&ce->guc_state.lock, flags);
3050 clr_context_wait_for_deregister_to_register(ce);
3051 __guc_signal_context_fence(ce);
3052 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3055 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3057 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3058 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
3059 !submission_disabled(ce_to_guc(ce));
3062 static void guc_context_init(struct intel_context *ce)
3064 const struct i915_gem_context *ctx;
3065 int prio = I915_CONTEXT_DEFAULT_PRIORITY;
3068 ctx = rcu_dereference(ce->gem_context);
3070 prio = ctx->sched.priority;
3073 ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3074 set_bit(CONTEXT_GUC_INIT, &ce->flags);
3077 static int guc_request_alloc(struct i915_request *rq)
3079 struct intel_context *ce = request_to_scheduling_context(rq);
3080 struct intel_guc *guc = ce_to_guc(ce);
3081 unsigned long flags;
3084 GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3087 * Flush enough space to reduce the likelihood of waiting after
3088 * we start building the request - in which case we will just
3089 * have to repeat work.
3091 rq->reserved_space += GUC_REQUEST_SIZE;
3094 * Note that after this point, we have committed to using
3095 * this request as it is being used to both track the
3096 * state of engine initialisation and liveness of the
3097 * golden renderstate above. Think twice before you try
3098 * to cancel/unwind this request now.
3101 /* Unconditionally invalidate GPU caches and TLBs. */
3102 ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
3106 rq->reserved_space -= GUC_REQUEST_SIZE;
3108 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3109 guc_context_init(ce);
3112 * Call pin_guc_id here rather than in the pinning step as with
3113 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
3114 * guc_id and creating horrible race conditions. This is especially bad
3115 * when guc_id are being stolen due to over subscription. By the time
3116 * this function is reached, it is guaranteed that the guc_id will be
3117 * persistent until the generated request is retired. Thus, sealing these
3118 * race conditions. It is still safe to fail here if guc_id are
3119 * exhausted and return -EAGAIN to the user indicating that they can try
3120 * again in the future.
3122 * There is no need for a lock here as the timeline mutex ensures at
3123 * most one context can be executing this code path at once. The
3124 * guc_id_ref is incremented once for every request in flight and
3125 * decremented on each retire. When it is zero, a lock around the
3126 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
3128 if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3131 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
3132 if (unlikely(ret < 0))
3134 if (context_needs_register(ce, !!ret)) {
3135 ret = guc_lrc_desc_pin(ce, true);
3136 if (unlikely(ret)) { /* unwind */
3137 if (ret == -EPIPE) {
3138 disable_submission(guc);
3139 goto out; /* GPU will be reset */
3141 atomic_dec(&ce->guc_id.ref);
3142 unpin_guc_id(guc, ce);
3147 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3151 * We block all requests on this context if a G2H is pending for a
3152 * schedule disable or context deregistration as the GuC will fail a
3153 * schedule enable or context registration if either G2H is pending
3154 * respectfully. Once a G2H returns, the fence is released that is
3155 * blocking these requests (see guc_signal_context_fence).
3157 spin_lock_irqsave(&ce->guc_state.lock, flags);
3158 if (context_wait_for_deregister_to_register(ce) ||
3159 context_pending_disable(ce)) {
3160 init_irq_work(&rq->submit_work, submit_work_cb);
3161 i915_sw_fence_await(&rq->submit);
3163 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3165 incr_context_committed_requests(ce);
3166 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3171 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3172 struct i915_gem_ww_ctx *ww,
3175 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3177 return __guc_context_pre_pin(ce, engine, ww, vaddr);
3180 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3182 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3183 int ret = __guc_context_pin(ce, engine, vaddr);
3184 intel_engine_mask_t tmp, mask = ce->engine->mask;
3187 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3188 intel_engine_pm_get(engine);
3193 static void guc_virtual_context_unpin(struct intel_context *ce)
3195 intel_engine_mask_t tmp, mask = ce->engine->mask;
3196 struct intel_engine_cs *engine;
3197 struct intel_guc *guc = ce_to_guc(ce);
3199 GEM_BUG_ON(context_enabled(ce));
3200 GEM_BUG_ON(intel_context_is_barrier(ce));
3202 unpin_guc_id(guc, ce);
3205 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3206 intel_engine_pm_put_async(engine);
3209 static void guc_virtual_context_enter(struct intel_context *ce)
3211 intel_engine_mask_t tmp, mask = ce->engine->mask;
3212 struct intel_engine_cs *engine;
3214 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3215 intel_engine_pm_get(engine);
3217 intel_timeline_enter(ce->timeline);
3220 static void guc_virtual_context_exit(struct intel_context *ce)
3222 intel_engine_mask_t tmp, mask = ce->engine->mask;
3223 struct intel_engine_cs *engine;
3225 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3226 intel_engine_pm_put(engine);
3228 intel_timeline_exit(ce->timeline);
3231 static int guc_virtual_context_alloc(struct intel_context *ce)
3233 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3235 return lrc_alloc(ce, engine);
3238 static const struct intel_context_ops virtual_guc_context_ops = {
3239 .alloc = guc_virtual_context_alloc,
3241 .pre_pin = guc_virtual_context_pre_pin,
3242 .pin = guc_virtual_context_pin,
3243 .unpin = guc_virtual_context_unpin,
3244 .post_unpin = guc_context_post_unpin,
3246 .ban = guc_context_ban,
3248 .cancel_request = guc_context_cancel_request,
3250 .enter = guc_virtual_context_enter,
3251 .exit = guc_virtual_context_exit,
3253 .sched_disable = guc_context_sched_disable,
3255 .destroy = guc_context_destroy,
3257 .get_sibling = guc_virtual_get_sibling,
3260 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
3262 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3263 struct intel_guc *guc = ce_to_guc(ce);
3266 GEM_BUG_ON(!intel_context_is_parent(ce));
3267 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3269 ret = pin_guc_id(guc, ce);
3270 if (unlikely(ret < 0))
3273 return __guc_context_pin(ce, engine, vaddr);
3276 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
3278 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3280 GEM_BUG_ON(!intel_context_is_child(ce));
3281 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3283 __intel_context_pin(ce->parallel.parent);
3284 return __guc_context_pin(ce, engine, vaddr);
3287 static void guc_parent_context_unpin(struct intel_context *ce)
3289 struct intel_guc *guc = ce_to_guc(ce);
3291 GEM_BUG_ON(context_enabled(ce));
3292 GEM_BUG_ON(intel_context_is_barrier(ce));
3293 GEM_BUG_ON(!intel_context_is_parent(ce));
3294 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3296 unpin_guc_id(guc, ce);
3300 static void guc_child_context_unpin(struct intel_context *ce)
3302 GEM_BUG_ON(context_enabled(ce));
3303 GEM_BUG_ON(intel_context_is_barrier(ce));
3304 GEM_BUG_ON(!intel_context_is_child(ce));
3305 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3310 static void guc_child_context_post_unpin(struct intel_context *ce)
3312 GEM_BUG_ON(!intel_context_is_child(ce));
3313 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
3314 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3317 intel_context_unpin(ce->parallel.parent);
3320 static void guc_child_context_destroy(struct kref *kref)
3322 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3324 __guc_context_destroy(ce);
3327 static const struct intel_context_ops virtual_parent_context_ops = {
3328 .alloc = guc_virtual_context_alloc,
3330 .pre_pin = guc_context_pre_pin,
3331 .pin = guc_parent_context_pin,
3332 .unpin = guc_parent_context_unpin,
3333 .post_unpin = guc_context_post_unpin,
3335 .ban = guc_context_ban,
3337 .cancel_request = guc_context_cancel_request,
3339 .enter = guc_virtual_context_enter,
3340 .exit = guc_virtual_context_exit,
3342 .sched_disable = guc_context_sched_disable,
3344 .destroy = guc_context_destroy,
3346 .get_sibling = guc_virtual_get_sibling,
3349 static const struct intel_context_ops virtual_child_context_ops = {
3350 .alloc = guc_virtual_context_alloc,
3352 .pre_pin = guc_context_pre_pin,
3353 .pin = guc_child_context_pin,
3354 .unpin = guc_child_context_unpin,
3355 .post_unpin = guc_child_context_post_unpin,
3357 .cancel_request = guc_context_cancel_request,
3359 .enter = guc_virtual_context_enter,
3360 .exit = guc_virtual_context_exit,
3362 .destroy = guc_child_context_destroy,
3364 .get_sibling = guc_virtual_get_sibling,
3368 * The below override of the breadcrumbs is enabled when the user configures a
3369 * context for parallel submission (multi-lrc, parent-child).
3371 * The overridden breadcrumbs implements an algorithm which allows the GuC to
3372 * safely preempt all the hw contexts configured for parallel submission
3373 * between each BB. The contract between the i915 and GuC is if the parent
3374 * context can be preempted, all the children can be preempted, and the GuC will
3375 * always try to preempt the parent before the children. A handshake between the
3376 * parent / children breadcrumbs ensures the i915 holds up its end of the deal
3377 * creating a window to preempt between each set of BBs.
3379 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
3380 u64 offset, u32 len,
3381 const unsigned int flags);
3382 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
3383 u64 offset, u32 len,
3384 const unsigned int flags);
3386 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
3389 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
3392 static struct intel_context *
3393 guc_create_parallel(struct intel_engine_cs **engines,
3394 unsigned int num_siblings,
3397 struct intel_engine_cs **siblings = NULL;
3398 struct intel_context *parent = NULL, *ce, *err;
3401 siblings = kmalloc_array(num_siblings,
3405 return ERR_PTR(-ENOMEM);
3407 for (i = 0; i < width; ++i) {
3408 for (j = 0; j < num_siblings; ++j)
3409 siblings[j] = engines[i * num_siblings + j];
3411 ce = intel_engine_create_virtual(siblings, num_siblings,
3420 parent->ops = &virtual_parent_context_ops;
3422 ce->ops = &virtual_child_context_ops;
3423 intel_context_bind_parent_child(parent, ce);
3427 parent->parallel.fence_context = dma_fence_context_alloc(1);
3429 parent->engine->emit_bb_start =
3430 emit_bb_start_parent_no_preempt_mid_batch;
3431 parent->engine->emit_fini_breadcrumb =
3432 emit_fini_breadcrumb_parent_no_preempt_mid_batch;
3433 parent->engine->emit_fini_breadcrumb_dw =
3434 12 + 4 * parent->parallel.number_children;
3435 for_each_child(parent, ce) {
3436 ce->engine->emit_bb_start =
3437 emit_bb_start_child_no_preempt_mid_batch;
3438 ce->engine->emit_fini_breadcrumb =
3439 emit_fini_breadcrumb_child_no_preempt_mid_batch;
3440 ce->engine->emit_fini_breadcrumb_dw = 16;
3448 intel_context_put(parent);
3454 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
3456 struct intel_engine_cs *sibling;
3457 intel_engine_mask_t tmp, mask = b->engine_mask;
3458 bool result = false;
3460 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3461 result |= intel_engine_irq_enable(sibling);
3467 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
3469 struct intel_engine_cs *sibling;
3470 intel_engine_mask_t tmp, mask = b->engine_mask;
3472 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3473 intel_engine_irq_disable(sibling);
3476 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
3481 * In GuC submission mode we do not know which physical engine a request
3482 * will be scheduled on, this creates a problem because the breadcrumb
3483 * interrupt is per physical engine. To work around this we attach
3484 * requests and direct all breadcrumb interrupts to the first instance
3485 * of an engine per class. In addition all breadcrumb interrupts are
3486 * enabled / disabled across an engine class in unison.
3488 for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
3489 struct intel_engine_cs *sibling =
3490 engine->gt->engine_class[engine->class][i];
3493 if (engine->breadcrumbs != sibling->breadcrumbs) {
3494 intel_breadcrumbs_put(engine->breadcrumbs);
3495 engine->breadcrumbs =
3496 intel_breadcrumbs_get(sibling->breadcrumbs);
3502 if (engine->breadcrumbs) {
3503 engine->breadcrumbs->engine_mask |= engine->mask;
3504 engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
3505 engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
3509 static void guc_bump_inflight_request_prio(struct i915_request *rq,
3512 struct intel_context *ce = request_to_scheduling_context(rq);
3513 u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
3515 /* Short circuit function */
3516 if (prio < I915_PRIORITY_NORMAL ||
3517 rq->guc_prio == GUC_PRIO_FINI ||
3518 (rq->guc_prio != GUC_PRIO_INIT &&
3519 !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
3522 spin_lock(&ce->guc_state.lock);
3523 if (rq->guc_prio != GUC_PRIO_FINI) {
3524 if (rq->guc_prio != GUC_PRIO_INIT)
3525 sub_context_inflight_prio(ce, rq->guc_prio);
3526 rq->guc_prio = new_guc_prio;
3527 add_context_inflight_prio(ce, rq->guc_prio);
3528 update_context_prio(ce);
3530 spin_unlock(&ce->guc_state.lock);
3533 static void guc_retire_inflight_request_prio(struct i915_request *rq)
3535 struct intel_context *ce = request_to_scheduling_context(rq);
3537 spin_lock(&ce->guc_state.lock);
3538 guc_prio_fini(rq, ce);
3539 spin_unlock(&ce->guc_state.lock);
3542 static void sanitize_hwsp(struct intel_engine_cs *engine)
3544 struct intel_timeline *tl;
3546 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
3547 intel_timeline_reset_seqno(tl);
3550 static void guc_sanitize(struct intel_engine_cs *engine)
3553 * Poison residual state on resume, in case the suspend didn't!
3555 * We have to assume that across suspend/resume (or other loss
3556 * of control) that the contents of our pinned buffers has been
3557 * lost, replaced by garbage. Since this doesn't always happen,
3558 * let's poison such state so that we more quickly spot when
3559 * we falsely assume it has been preserved.
3561 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
3562 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
3565 * The kernel_context HWSP is stored in the status_page. As above,
3566 * that may be lost on resume/initialisation, and so we need to
3567 * reset the value in the HWSP.
3569 sanitize_hwsp(engine);
3571 /* And scrub the dirty cachelines for the HWSP */
3572 clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
3574 intel_engine_reset_pinned_contexts(engine);
3577 static void setup_hwsp(struct intel_engine_cs *engine)
3579 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
3581 ENGINE_WRITE_FW(engine,
3583 i915_ggtt_offset(engine->status_page.vma));
3586 static void start_engine(struct intel_engine_cs *engine)
3588 ENGINE_WRITE_FW(engine,
3590 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
3592 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
3593 ENGINE_POSTING_READ(engine, RING_MI_MODE);
3596 static int guc_resume(struct intel_engine_cs *engine)
3598 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
3600 intel_mocs_init_engine(engine);
3602 intel_breadcrumbs_reset(engine->breadcrumbs);
3605 start_engine(engine);
3607 if (engine->class == RENDER_CLASS)
3608 xehp_enable_ccs_engines(engine);
3613 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
3615 return !sched_engine->tasklet.callback;
3618 static void guc_set_default_submission(struct intel_engine_cs *engine)
3620 engine->submit_request = guc_submit_request;
3623 static inline void guc_kernel_context_pin(struct intel_guc *guc,
3624 struct intel_context *ce)
3626 if (context_guc_id_invalid(ce))
3627 pin_guc_id(guc, ce);
3628 guc_lrc_desc_pin(ce, true);
3631 static inline void guc_init_lrc_mapping(struct intel_guc *guc)
3633 struct intel_gt *gt = guc_to_gt(guc);
3634 struct intel_engine_cs *engine;
3635 enum intel_engine_id id;
3637 /* make sure all descriptors are clean... */
3638 xa_destroy(&guc->context_lookup);
3641 * Some contexts might have been pinned before we enabled GuC
3642 * submission, so we need to add them to the GuC bookeeping.
3643 * Also, after a reset the of the GuC we want to make sure that the
3644 * information shared with GuC is properly reset. The kernel LRCs are
3645 * not attached to the gem_context, so they need to be added separately.
3647 * Note: we purposefully do not check the return of guc_lrc_desc_pin,
3648 * because that function can only fail if a reset is just starting. This
3649 * is at the end of reset so presumably another reset isn't happening
3650 * and even it did this code would be run again.
3653 for_each_engine(engine, gt, id) {
3654 struct intel_context *ce;
3656 list_for_each_entry(ce, &engine->pinned_contexts_list,
3657 pinned_contexts_link)
3658 guc_kernel_context_pin(guc, ce);
3662 static void guc_release(struct intel_engine_cs *engine)
3664 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
3666 intel_engine_cleanup_common(engine);
3667 lrc_fini_wa_ctx(engine);
3670 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
3672 struct intel_engine_cs *e;
3673 intel_engine_mask_t tmp, mask = engine->mask;
3675 for_each_engine_masked(e, engine->gt, mask, tmp)
3679 static void guc_default_vfuncs(struct intel_engine_cs *engine)
3681 /* Default vfuncs which can be overridden by each engine. */
3683 engine->resume = guc_resume;
3685 engine->cops = &guc_context_ops;
3686 engine->request_alloc = guc_request_alloc;
3687 engine->add_active_request = add_to_context;
3688 engine->remove_active_request = remove_from_context;
3690 engine->sched_engine->schedule = i915_schedule;
3692 engine->reset.prepare = guc_reset_nop;
3693 engine->reset.rewind = guc_rewind_nop;
3694 engine->reset.cancel = guc_reset_nop;
3695 engine->reset.finish = guc_reset_nop;
3697 engine->emit_flush = gen8_emit_flush_xcs;
3698 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
3699 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
3700 if (GRAPHICS_VER(engine->i915) >= 12) {
3701 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
3702 engine->emit_flush = gen12_emit_flush_xcs;
3704 engine->set_default_submission = guc_set_default_submission;
3705 engine->busyness = guc_engine_busyness;
3707 engine->flags |= I915_ENGINE_SUPPORTS_STATS;
3708 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
3709 engine->flags |= I915_ENGINE_HAS_TIMESLICES;
3712 * TODO: GuC supports timeslicing and semaphores as well, but they're
3713 * handled by the firmware so some minor tweaks are required before
3716 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
3719 engine->emit_bb_start = gen8_emit_bb_start;
3722 static void rcs_submission_override(struct intel_engine_cs *engine)
3724 switch (GRAPHICS_VER(engine->i915)) {
3726 engine->emit_flush = gen12_emit_flush_rcs;
3727 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
3730 engine->emit_flush = gen11_emit_flush_rcs;
3731 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
3734 engine->emit_flush = gen8_emit_flush_rcs;
3735 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
3740 static inline void guc_default_irqs(struct intel_engine_cs *engine)
3742 engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
3743 intel_engine_set_irq_handler(engine, cs_irq_handler);
3746 static void guc_sched_engine_destroy(struct kref *kref)
3748 struct i915_sched_engine *sched_engine =
3749 container_of(kref, typeof(*sched_engine), ref);
3750 struct intel_guc *guc = sched_engine->private_data;
3752 guc->sched_engine = NULL;
3753 tasklet_kill(&sched_engine->tasklet); /* flush the callback */
3754 kfree(sched_engine);
3757 int intel_guc_submission_setup(struct intel_engine_cs *engine)
3759 struct drm_i915_private *i915 = engine->i915;
3760 struct intel_guc *guc = &engine->gt->uc.guc;
3763 * The setup relies on several assumptions (e.g. irqs always enabled)
3764 * that are only valid on gen11+
3766 GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
3768 if (!guc->sched_engine) {
3769 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
3770 if (!guc->sched_engine)
3773 guc->sched_engine->schedule = i915_schedule;
3774 guc->sched_engine->disabled = guc_sched_engine_disabled;
3775 guc->sched_engine->private_data = guc;
3776 guc->sched_engine->destroy = guc_sched_engine_destroy;
3777 guc->sched_engine->bump_inflight_request_prio =
3778 guc_bump_inflight_request_prio;
3779 guc->sched_engine->retire_inflight_request_prio =
3780 guc_retire_inflight_request_prio;
3781 tasklet_setup(&guc->sched_engine->tasklet,
3782 guc_submission_tasklet);
3784 i915_sched_engine_put(engine->sched_engine);
3785 engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
3787 guc_default_vfuncs(engine);
3788 guc_default_irqs(engine);
3789 guc_init_breadcrumbs(engine);
3791 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
3792 rcs_submission_override(engine);
3794 lrc_init_wa_ctx(engine);
3796 /* Finally, take ownership and responsibility for cleanup! */
3797 engine->sanitize = guc_sanitize;
3798 engine->release = guc_release;
3803 void intel_guc_submission_enable(struct intel_guc *guc)
3805 guc_init_lrc_mapping(guc);
3806 guc_init_engine_stats(guc);
3809 void intel_guc_submission_disable(struct intel_guc *guc)
3811 /* Note: By the time we're here, GuC may have already been reset */
3814 static bool __guc_submission_supported(struct intel_guc *guc)
3816 /* GuC submission is unavailable for pre-Gen11 */
3817 return intel_guc_is_supported(guc) &&
3818 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
3821 static bool __guc_submission_selected(struct intel_guc *guc)
3823 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
3825 if (!intel_guc_submission_is_supported(guc))
3828 return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
3831 void intel_guc_submission_init_early(struct intel_guc *guc)
3833 xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
3835 spin_lock_init(&guc->submission_state.lock);
3836 INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
3837 ida_init(&guc->submission_state.guc_ids);
3838 INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
3839 INIT_WORK(&guc->submission_state.destroyed_worker,
3840 destroyed_worker_func);
3841 INIT_WORK(&guc->submission_state.reset_fail_worker,
3842 reset_fail_worker_func);
3844 spin_lock_init(&guc->timestamp.lock);
3845 INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
3847 guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
3848 guc->submission_supported = __guc_submission_supported(guc);
3849 guc->submission_selected = __guc_submission_selected(guc);
3852 static inline struct intel_context *
3853 g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
3855 struct intel_context *ce;
3857 if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
3858 drm_err(&guc_to_gt(guc)->i915->drm,
3859 "Invalid desc_idx %u", desc_idx);
3863 ce = __get_context(guc, desc_idx);
3864 if (unlikely(!ce)) {
3865 drm_err(&guc_to_gt(guc)->i915->drm,
3866 "Context is NULL, desc_idx %u", desc_idx);
3870 if (unlikely(intel_context_is_child(ce))) {
3871 drm_err(&guc_to_gt(guc)->i915->drm,
3872 "Context is child, desc_idx %u", desc_idx);
3879 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
3883 struct intel_context *ce;
3884 u32 desc_idx = msg[0];
3886 if (unlikely(len < 1)) {
3887 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3891 ce = g2h_context_lookup(guc, desc_idx);
3895 trace_intel_context_deregister_done(ce);
3897 #ifdef CONFIG_DRM_I915_SELFTEST
3898 if (unlikely(ce->drop_deregister)) {
3899 ce->drop_deregister = false;
3904 if (context_wait_for_deregister_to_register(ce)) {
3905 struct intel_runtime_pm *runtime_pm =
3906 &ce->engine->gt->i915->runtime_pm;
3907 intel_wakeref_t wakeref;
3910 * Previous owner of this guc_id has been deregistered, now safe
3911 * register this context.
3913 with_intel_runtime_pm(runtime_pm, wakeref)
3914 register_context(ce, true);
3915 guc_signal_context_fence(ce);
3916 intel_context_put(ce);
3917 } else if (context_destroyed(ce)) {
3918 /* Context has been destroyed */
3919 intel_gt_pm_put_async(guc_to_gt(guc));
3920 release_guc_id(guc, ce);
3921 __guc_context_destroy(ce);
3924 decr_outstanding_submission_g2h(guc);
3929 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
3933 struct intel_context *ce;
3934 unsigned long flags;
3935 u32 desc_idx = msg[0];
3937 if (unlikely(len < 2)) {
3938 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3942 ce = g2h_context_lookup(guc, desc_idx);
3946 if (unlikely(context_destroyed(ce) ||
3947 (!context_pending_enable(ce) &&
3948 !context_pending_disable(ce)))) {
3949 drm_err(&guc_to_gt(guc)->i915->drm,
3950 "Bad context sched_state 0x%x, desc_idx %u",
3951 ce->guc_state.sched_state, desc_idx);
3955 trace_intel_context_sched_done(ce);
3957 if (context_pending_enable(ce)) {
3958 #ifdef CONFIG_DRM_I915_SELFTEST
3959 if (unlikely(ce->drop_schedule_enable)) {
3960 ce->drop_schedule_enable = false;
3965 spin_lock_irqsave(&ce->guc_state.lock, flags);
3966 clr_context_pending_enable(ce);
3967 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3968 } else if (context_pending_disable(ce)) {
3971 #ifdef CONFIG_DRM_I915_SELFTEST
3972 if (unlikely(ce->drop_schedule_disable)) {
3973 ce->drop_schedule_disable = false;
3979 * Unpin must be done before __guc_signal_context_fence,
3980 * otherwise a race exists between the requests getting
3981 * submitted + retired before this unpin completes resulting in
3982 * the pin_count going to zero and the context still being
3985 intel_context_sched_disable_unpin(ce);
3987 spin_lock_irqsave(&ce->guc_state.lock, flags);
3988 banned = context_banned(ce);
3989 clr_context_banned(ce);
3990 clr_context_pending_disable(ce);
3991 __guc_signal_context_fence(ce);
3992 guc_blocked_fence_complete(ce);
3993 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3996 guc_cancel_context_requests(ce);
3997 intel_engine_signal_breadcrumbs(ce->engine);
4001 decr_outstanding_submission_g2h(guc);
4002 intel_context_put(ce);
4007 static void capture_error_state(struct intel_guc *guc,
4008 struct intel_context *ce)
4010 struct intel_gt *gt = guc_to_gt(guc);
4011 struct drm_i915_private *i915 = gt->i915;
4012 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
4013 intel_wakeref_t wakeref;
4015 intel_engine_set_hung_context(engine, ce);
4016 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
4017 i915_capture_error_state(gt, engine->mask);
4018 atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
4021 static void guc_context_replay(struct intel_context *ce)
4023 struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
4025 __guc_reset_context(ce, true);
4026 tasklet_hi_schedule(&sched_engine->tasklet);
4029 static void guc_handle_context_reset(struct intel_guc *guc,
4030 struct intel_context *ce)
4032 trace_intel_context_reset(ce);
4034 if (likely(!intel_context_is_banned(ce))) {
4035 capture_error_state(guc, ce);
4036 guc_context_replay(ce);
4038 drm_info(&guc_to_gt(guc)->i915->drm,
4039 "Ignoring context reset notification of banned context 0x%04X on %s",
4040 ce->guc_id.id, ce->engine->name);
4044 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
4045 const u32 *msg, u32 len)
4047 struct intel_context *ce;
4048 unsigned long flags;
4051 if (unlikely(len != 1)) {
4052 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
4059 * The context lookup uses the xarray but lookups only require an RCU lock
4060 * not the full spinlock. So take the lock explicitly and keep it until the
4061 * context has been reference count locked to ensure it can't be destroyed
4062 * asynchronously until the reset is done.
4064 xa_lock_irqsave(&guc->context_lookup, flags);
4065 ce = g2h_context_lookup(guc, desc_idx);
4067 intel_context_get(ce);
4068 xa_unlock_irqrestore(&guc->context_lookup, flags);
4073 guc_handle_context_reset(guc, ce);
4074 intel_context_put(ce);
4079 int intel_guc_error_capture_process_msg(struct intel_guc *guc,
4080 const u32 *msg, u32 len)
4084 if (unlikely(len != 1)) {
4085 drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
4090 drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status);
4092 /* FIXME: Do something with the capture */
4097 static struct intel_engine_cs *
4098 guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
4100 struct intel_gt *gt = guc_to_gt(guc);
4101 u8 engine_class = guc_class_to_engine_class(guc_class);
4103 /* Class index is checked in class converter */
4104 GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
4106 return gt->engine_class[engine_class][instance];
4109 static void reset_fail_worker_func(struct work_struct *w)
4111 struct intel_guc *guc = container_of(w, struct intel_guc,
4112 submission_state.reset_fail_worker);
4113 struct intel_gt *gt = guc_to_gt(guc);
4114 intel_engine_mask_t reset_fail_mask;
4115 unsigned long flags;
4117 spin_lock_irqsave(&guc->submission_state.lock, flags);
4118 reset_fail_mask = guc->submission_state.reset_fail_mask;
4119 guc->submission_state.reset_fail_mask = 0;
4120 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
4122 if (likely(reset_fail_mask))
4123 intel_gt_handle_error(gt, reset_fail_mask,
4125 "GuC failed to reset engine mask=0x%x\n",
4129 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
4130 const u32 *msg, u32 len)
4132 struct intel_engine_cs *engine;
4133 struct intel_gt *gt = guc_to_gt(guc);
4134 u8 guc_class, instance;
4136 unsigned long flags;
4138 if (unlikely(len != 3)) {
4139 drm_err(>->i915->drm, "Invalid length %u", len);
4147 engine = guc_lookup_engine(guc, guc_class, instance);
4148 if (unlikely(!engine)) {
4149 drm_err(>->i915->drm,
4150 "Invalid engine %d:%d", guc_class, instance);
4155 * This is an unexpected failure of a hardware feature. So, log a real
4156 * error message not just the informational that comes with the reset.
4158 drm_err(>->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
4159 guc_class, instance, engine->name, reason);
4161 spin_lock_irqsave(&guc->submission_state.lock, flags);
4162 guc->submission_state.reset_fail_mask |= engine->mask;
4163 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
4166 * A GT reset flushes this worker queue (G2H handler) so we must use
4167 * another worker to trigger a GT reset.
4169 queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
4174 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
4176 struct intel_guc *guc = &engine->gt->uc.guc;
4177 struct intel_context *ce;
4178 struct i915_request *rq;
4179 unsigned long index;
4180 unsigned long flags;
4182 /* Reset called during driver load? GuC not yet initialised! */
4183 if (unlikely(!guc_submission_initialized(guc)))
4186 xa_lock_irqsave(&guc->context_lookup, flags);
4187 xa_for_each(&guc->context_lookup, index, ce) {
4188 if (!kref_get_unless_zero(&ce->ref))
4191 xa_unlock(&guc->context_lookup);
4193 if (!intel_context_is_pinned(ce))
4196 if (intel_engine_is_virtual(ce->engine)) {
4197 if (!(ce->engine->mask & engine->mask))
4200 if (ce->engine != engine)
4204 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
4205 if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
4208 intel_engine_set_hung_context(engine, ce);
4210 /* Can only cope with one hang at a time... */
4211 intel_context_put(ce);
4212 xa_lock(&guc->context_lookup);
4216 intel_context_put(ce);
4217 xa_lock(&guc->context_lookup);
4220 xa_unlock_irqrestore(&guc->context_lookup, flags);
4223 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
4224 struct i915_request *hung_rq,
4225 struct drm_printer *m)
4227 struct intel_guc *guc = &engine->gt->uc.guc;
4228 struct intel_context *ce;
4229 unsigned long index;
4230 unsigned long flags;
4232 /* Reset called during driver load? GuC not yet initialised! */
4233 if (unlikely(!guc_submission_initialized(guc)))
4236 xa_lock_irqsave(&guc->context_lookup, flags);
4237 xa_for_each(&guc->context_lookup, index, ce) {
4238 if (!kref_get_unless_zero(&ce->ref))
4241 xa_unlock(&guc->context_lookup);
4243 if (!intel_context_is_pinned(ce))
4246 if (intel_engine_is_virtual(ce->engine)) {
4247 if (!(ce->engine->mask & engine->mask))
4250 if (ce->engine != engine)
4254 spin_lock(&ce->guc_state.lock);
4255 intel_engine_dump_active_requests(&ce->guc_state.requests,
4257 spin_unlock(&ce->guc_state.lock);
4260 intel_context_put(ce);
4261 xa_lock(&guc->context_lookup);
4263 xa_unlock_irqrestore(&guc->context_lookup, flags);
4266 void intel_guc_submission_print_info(struct intel_guc *guc,
4267 struct drm_printer *p)
4269 struct i915_sched_engine *sched_engine = guc->sched_engine;
4271 unsigned long flags;
4276 drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
4277 atomic_read(&guc->outstanding_submission_g2h));
4278 drm_printf(p, "GuC tasklet count: %u\n\n",
4279 atomic_read(&sched_engine->tasklet.count));
4281 spin_lock_irqsave(&sched_engine->lock, flags);
4282 drm_printf(p, "Requests in GuC submit tasklet:\n");
4283 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
4284 struct i915_priolist *pl = to_priolist(rb);
4285 struct i915_request *rq;
4287 priolist_for_each_request(rq, pl)
4288 drm_printf(p, "guc_id=%u, seqno=%llu\n",
4289 rq->context->guc_id.id,
4292 spin_unlock_irqrestore(&sched_engine->lock, flags);
4293 drm_printf(p, "\n");
4296 static inline void guc_log_context_priority(struct drm_printer *p,
4297 struct intel_context *ce)
4301 drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
4302 drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
4303 for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
4304 i < GUC_CLIENT_PRIORITY_NUM; ++i) {
4305 drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
4306 i, ce->guc_state.prio_count[i]);
4308 drm_printf(p, "\n");
4311 static inline void guc_log_context(struct drm_printer *p,
4312 struct intel_context *ce)
4314 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
4315 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
4316 drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
4318 ce->lrc_reg_state[CTX_RING_HEAD]);
4319 drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
4321 ce->lrc_reg_state[CTX_RING_TAIL]);
4322 drm_printf(p, "\t\tContext Pin Count: %u\n",
4323 atomic_read(&ce->pin_count));
4324 drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
4325 atomic_read(&ce->guc_id.ref));
4326 drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
4327 ce->guc_state.sched_state);
4330 void intel_guc_submission_print_context_info(struct intel_guc *guc,
4331 struct drm_printer *p)
4333 struct intel_context *ce;
4334 unsigned long index;
4335 unsigned long flags;
4337 xa_lock_irqsave(&guc->context_lookup, flags);
4338 xa_for_each(&guc->context_lookup, index, ce) {
4339 GEM_BUG_ON(intel_context_is_child(ce));
4341 guc_log_context(p, ce);
4342 guc_log_context_priority(p, ce);
4344 if (intel_context_is_parent(ce)) {
4345 struct guc_process_desc *desc = __get_process_desc(ce);
4346 struct intel_context *child;
4348 drm_printf(p, "\t\tNumber children: %u\n",
4349 ce->parallel.number_children);
4350 drm_printf(p, "\t\tWQI Head: %u\n",
4351 READ_ONCE(desc->head));
4352 drm_printf(p, "\t\tWQI Tail: %u\n",
4353 READ_ONCE(desc->tail));
4354 drm_printf(p, "\t\tWQI Status: %u\n\n",
4355 READ_ONCE(desc->wq_status));
4357 if (ce->engine->emit_bb_start ==
4358 emit_bb_start_parent_no_preempt_mid_batch) {
4361 drm_printf(p, "\t\tChildren Go: %u\n\n",
4362 get_children_go_value(ce));
4363 for (i = 0; i < ce->parallel.number_children; ++i)
4364 drm_printf(p, "\t\tChildren Join: %u\n",
4365 get_children_join_value(ce, i));
4368 for_each_child(ce, child)
4369 guc_log_context(p, child);
4372 xa_unlock_irqrestore(&guc->context_lookup, flags);
4375 static inline u32 get_children_go_addr(struct intel_context *ce)
4377 GEM_BUG_ON(!intel_context_is_parent(ce));
4379 return i915_ggtt_offset(ce->state) +
4380 __get_parent_scratch_offset(ce) +
4381 offsetof(struct parent_scratch, go.semaphore);
4384 static inline u32 get_children_join_addr(struct intel_context *ce,
4387 GEM_BUG_ON(!intel_context_is_parent(ce));
4389 return i915_ggtt_offset(ce->state) +
4390 __get_parent_scratch_offset(ce) +
4391 offsetof(struct parent_scratch, join[child_index].semaphore);
4394 #define PARENT_GO_BB 1
4395 #define PARENT_GO_FINI_BREADCRUMB 0
4396 #define CHILD_GO_BB 1
4397 #define CHILD_GO_FINI_BREADCRUMB 0
4398 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
4399 u64 offset, u32 len,
4400 const unsigned int flags)
4402 struct intel_context *ce = rq->context;
4406 GEM_BUG_ON(!intel_context_is_parent(ce));
4408 cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
4412 /* Wait on children */
4413 for (i = 0; i < ce->parallel.number_children; ++i) {
4414 *cs++ = (MI_SEMAPHORE_WAIT |
4415 MI_SEMAPHORE_GLOBAL_GTT |
4417 MI_SEMAPHORE_SAD_EQ_SDD);
4418 *cs++ = PARENT_GO_BB;
4419 *cs++ = get_children_join_addr(ce, i);
4423 /* Turn off preemption */
4424 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4427 /* Tell children go */
4428 cs = gen8_emit_ggtt_write(cs,
4430 get_children_go_addr(ce),
4434 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
4435 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4436 *cs++ = lower_32_bits(offset);
4437 *cs++ = upper_32_bits(offset);
4440 intel_ring_advance(rq, cs);
4445 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
4446 u64 offset, u32 len,
4447 const unsigned int flags)
4449 struct intel_context *ce = rq->context;
4450 struct intel_context *parent = intel_context_to_parent(ce);
4453 GEM_BUG_ON(!intel_context_is_child(ce));
4455 cs = intel_ring_begin(rq, 12);
4460 cs = gen8_emit_ggtt_write(cs,
4462 get_children_join_addr(parent,
4463 ce->parallel.child_index),
4466 /* Wait on parent for go */
4467 *cs++ = (MI_SEMAPHORE_WAIT |
4468 MI_SEMAPHORE_GLOBAL_GTT |
4470 MI_SEMAPHORE_SAD_EQ_SDD);
4471 *cs++ = CHILD_GO_BB;
4472 *cs++ = get_children_go_addr(parent);
4475 /* Turn off preemption */
4476 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4479 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
4480 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4481 *cs++ = lower_32_bits(offset);
4482 *cs++ = upper_32_bits(offset);
4484 intel_ring_advance(rq, cs);
4490 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4493 struct intel_context *ce = rq->context;
4496 GEM_BUG_ON(!intel_context_is_parent(ce));
4498 /* Wait on children */
4499 for (i = 0; i < ce->parallel.number_children; ++i) {
4500 *cs++ = (MI_SEMAPHORE_WAIT |
4501 MI_SEMAPHORE_GLOBAL_GTT |
4503 MI_SEMAPHORE_SAD_EQ_SDD);
4504 *cs++ = PARENT_GO_FINI_BREADCRUMB;
4505 *cs++ = get_children_join_addr(ce, i);
4509 /* Turn on preemption */
4510 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4513 /* Tell children go */
4514 cs = gen8_emit_ggtt_write(cs,
4515 CHILD_GO_FINI_BREADCRUMB,
4516 get_children_go_addr(ce),
4523 * If this true, a submission of multi-lrc requests had an error and the
4524 * requests need to be skipped. The front end (execuf IOCTL) should've called
4525 * i915_request_skip which squashes the BB but we still need to emit the fini
4526 * breadrcrumbs seqno write. At this point we don't know how many of the
4527 * requests in the multi-lrc submission were generated so we can't do the
4528 * handshake between the parent and children (e.g. if 4 requests should be
4529 * generated but 2nd hit an error only 1 would be seen by the GuC backend).
4530 * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
4531 * has occurred on any of the requests in submission / relationship.
4533 static inline bool skip_handshake(struct i915_request *rq)
4535 return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
4538 #define NON_SKIP_LEN 6
4540 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4543 struct intel_context *ce = rq->context;
4544 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
4545 __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
4547 GEM_BUG_ON(!intel_context_is_parent(ce));
4549 if (unlikely(skip_handshake(rq))) {
4551 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
4552 * the NON_SKIP_LEN comes from the length of the emits below.
4554 memset(cs, 0, sizeof(u32) *
4555 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
4556 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
4558 cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
4561 /* Emit fini breadcrumb */
4562 before_fini_breadcrumb_user_interrupt_cs = cs;
4563 cs = gen8_emit_ggtt_write(cs,
4565 i915_request_active_timeline(rq)->hwsp_offset,
4568 /* User interrupt */
4569 *cs++ = MI_USER_INTERRUPT;
4572 /* Ensure our math for skip + emit is correct */
4573 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
4575 GEM_BUG_ON(start_fini_breadcrumb_cs +
4576 ce->engine->emit_fini_breadcrumb_dw != cs);
4578 rq->tail = intel_ring_offset(rq, cs);
4584 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4587 struct intel_context *ce = rq->context;
4588 struct intel_context *parent = intel_context_to_parent(ce);
4590 GEM_BUG_ON(!intel_context_is_child(ce));
4592 /* Turn on preemption */
4593 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4597 cs = gen8_emit_ggtt_write(cs,
4598 PARENT_GO_FINI_BREADCRUMB,
4599 get_children_join_addr(parent,
4600 ce->parallel.child_index),
4603 /* Wait parent on for go */
4604 *cs++ = (MI_SEMAPHORE_WAIT |
4605 MI_SEMAPHORE_GLOBAL_GTT |
4607 MI_SEMAPHORE_SAD_EQ_SDD);
4608 *cs++ = CHILD_GO_FINI_BREADCRUMB;
4609 *cs++ = get_children_go_addr(parent);
4616 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4619 struct intel_context *ce = rq->context;
4620 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
4621 __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
4623 GEM_BUG_ON(!intel_context_is_child(ce));
4625 if (unlikely(skip_handshake(rq))) {
4627 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
4628 * the NON_SKIP_LEN comes from the length of the emits below.
4630 memset(cs, 0, sizeof(u32) *
4631 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
4632 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
4634 cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
4637 /* Emit fini breadcrumb */
4638 before_fini_breadcrumb_user_interrupt_cs = cs;
4639 cs = gen8_emit_ggtt_write(cs,
4641 i915_request_active_timeline(rq)->hwsp_offset,
4644 /* User interrupt */
4645 *cs++ = MI_USER_INTERRUPT;
4648 /* Ensure our math for skip + emit is correct */
4649 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
4651 GEM_BUG_ON(start_fini_breadcrumb_cs +
4652 ce->engine->emit_fini_breadcrumb_dw != cs);
4654 rq->tail = intel_ring_offset(rq, cs);
4661 static struct intel_context *
4662 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
4663 unsigned long flags)
4665 struct guc_virtual_engine *ve;
4666 struct intel_guc *guc;
4670 ve = kzalloc(sizeof(*ve), GFP_KERNEL);
4672 return ERR_PTR(-ENOMEM);
4674 guc = &siblings[0]->gt->uc.guc;
4676 ve->base.i915 = siblings[0]->i915;
4677 ve->base.gt = siblings[0]->gt;
4678 ve->base.uncore = siblings[0]->uncore;
4681 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
4682 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
4683 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
4684 ve->base.saturated = ALL_ENGINES;
4686 snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
4688 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
4690 ve->base.cops = &virtual_guc_context_ops;
4691 ve->base.request_alloc = guc_request_alloc;
4692 ve->base.bump_serial = virtual_guc_bump_serial;
4694 ve->base.submit_request = guc_submit_request;
4696 ve->base.flags = I915_ENGINE_IS_VIRTUAL;
4698 intel_context_init(&ve->context, &ve->base);
4700 for (n = 0; n < count; n++) {
4701 struct intel_engine_cs *sibling = siblings[n];
4703 GEM_BUG_ON(!is_power_of_2(sibling->mask));
4704 if (sibling->mask & ve->base.mask) {
4705 DRM_DEBUG("duplicate %s entry in load balancer\n",
4711 ve->base.mask |= sibling->mask;
4712 ve->base.logical_mask |= sibling->logical_mask;
4714 if (n != 0 && ve->base.class != sibling->class) {
4715 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
4716 sibling->class, ve->base.class);
4719 } else if (n == 0) {
4720 ve->base.class = sibling->class;
4721 ve->base.uabi_class = sibling->uabi_class;
4722 snprintf(ve->base.name, sizeof(ve->base.name),
4723 "v%dx%d", ve->base.class, count);
4724 ve->base.context_size = sibling->context_size;
4726 ve->base.add_active_request =
4727 sibling->add_active_request;
4728 ve->base.remove_active_request =
4729 sibling->remove_active_request;
4730 ve->base.emit_bb_start = sibling->emit_bb_start;
4731 ve->base.emit_flush = sibling->emit_flush;
4732 ve->base.emit_init_breadcrumb =
4733 sibling->emit_init_breadcrumb;
4734 ve->base.emit_fini_breadcrumb =
4735 sibling->emit_fini_breadcrumb;
4736 ve->base.emit_fini_breadcrumb_dw =
4737 sibling->emit_fini_breadcrumb_dw;
4738 ve->base.breadcrumbs =
4739 intel_breadcrumbs_get(sibling->breadcrumbs);
4741 ve->base.flags |= sibling->flags;
4743 ve->base.props.timeslice_duration_ms =
4744 sibling->props.timeslice_duration_ms;
4745 ve->base.props.preempt_timeout_ms =
4746 sibling->props.preempt_timeout_ms;
4750 return &ve->context;
4753 intel_context_put(&ve->context);
4754 return ERR_PTR(err);
4757 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
4759 struct intel_engine_cs *engine;
4760 intel_engine_mask_t tmp, mask = ve->mask;
4762 for_each_engine_masked(engine, ve->gt, mask, tmp)
4763 if (READ_ONCE(engine->props.heartbeat_interval_ms))
4769 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4770 #include "selftest_guc.c"
4771 #include "selftest_guc_multi_lrc.c"