1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include <linux/circ_buf.h>
8 #include "gem/i915_gem_context.h"
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_lrc_reg.h"
14 #include "gt/intel_ring.h"
16 #include "intel_guc_submission.h"
19 #include "i915_trace.h"
23 GUC_PREEMPT_INPROGRESS,
26 #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
27 #define GUC_PREEMPT_BREADCRUMB_BYTES \
28 (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
31 * DOC: GuC-based command submission
33 * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
34 * firmware is moving to an updated submission interface and we plan to
35 * turn submission back on when that lands. The below documentation (and related
36 * code) matches the old submission model and will be updated as part of the
37 * upgrade to the new flow.
40 * A intel_guc_client refers to a submission path through GuC. Currently, there
41 * is only one client, which is charged with all submissions to the GuC. This
42 * struct is the owner of a doorbell, a process descriptor and a workqueue (all
43 * of them inside a single gem object that contains all required pages for these
46 * GuC stage descriptor:
47 * During initialization, the driver allocates a static pool of 1024 such
48 * descriptors, and shares them with the GuC.
49 * Currently, there exists a 1:1 mapping between a intel_guc_client and a
50 * guc_stage_desc (via the client's stage_id), so effectively only one
51 * gets used. This stage descriptor lets the GuC know about the doorbell,
52 * workqueue and process descriptor. Theoretically, it also lets the GuC
53 * know about our HW contexts (context ID, etc...), but we actually
54 * employ a kind of submission where the GuC uses the LRCA sent via the work
55 * item instead (the single guc_stage_desc associated to execbuf client
56 * contains information about the default kernel context only, but this is
57 * essentially unused). This is called a "proxy" submission.
59 * The Scratch registers:
60 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
61 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
62 * triggers an interrupt on the GuC via another register write (0xC4C8).
63 * Firmware writes a success/fail code back to the action register after
64 * processes the request. The kernel driver polls waiting for this update and
66 * See intel_guc_send()
69 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
70 * mapped into process space.
73 * There are several types of work items that the host may place into a
74 * workqueue, each with its own requirements and limitations. Currently only
75 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
76 * represents in-order queue. The kernel driver packs ring tail pointer and an
77 * ELSP context descriptor dword into Work Item.
78 * See guc_add_request()
82 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
84 return rb_entry(rb, struct i915_priolist, node);
87 static inline bool is_high_priority(struct intel_guc_client *client)
89 return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
90 client->priority == GUC_CLIENT_PRIORITY_HIGH);
93 static int reserve_doorbell(struct intel_guc_client *client)
99 GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
102 * The bitmap tracks which doorbell registers are currently in use.
103 * It is split into two halves; the first half is used for normal
104 * priority contexts, the second half for high-priority ones.
107 end = GUC_NUM_DOORBELLS / 2;
108 if (is_high_priority(client)) {
113 id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
117 __set_bit(id, client->guc->doorbell_bitmap);
118 client->doorbell_id = id;
119 DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
120 client->stage_id, yesno(is_high_priority(client)),
125 static bool has_doorbell(struct intel_guc_client *client)
127 if (client->doorbell_id == GUC_DOORBELL_INVALID)
130 return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
133 static void unreserve_doorbell(struct intel_guc_client *client)
135 GEM_BUG_ON(!has_doorbell(client));
137 __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
138 client->doorbell_id = GUC_DOORBELL_INVALID;
142 * Tell the GuC to allocate or deallocate a specific doorbell
145 static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
148 INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
152 return intel_guc_send(guc, action, ARRAY_SIZE(action));
155 static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
158 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
162 return intel_guc_send(guc, action, ARRAY_SIZE(action));
165 static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
167 struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
169 return &base[client->stage_id];
173 * Initialise, update, or clear doorbell data shared with the GuC
175 * These functions modify shared data and so need access to the mapped
176 * client object which contains the page being used for the doorbell
179 static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
181 struct guc_stage_desc *desc;
183 /* Update the GuC's idea of the doorbell ID */
184 desc = __get_stage_desc(client);
185 desc->db_id = new_id;
188 static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
190 return client->vaddr + client->doorbell_offset;
193 static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
195 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
197 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
198 return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
201 static void __init_doorbell(struct intel_guc_client *client)
203 struct guc_doorbell_info *doorbell;
205 doorbell = __get_doorbell(client);
206 doorbell->db_status = GUC_DOORBELL_ENABLED;
207 doorbell->cookie = 0;
210 static void __fini_doorbell(struct intel_guc_client *client)
212 struct guc_doorbell_info *doorbell;
213 u16 db_id = client->doorbell_id;
215 doorbell = __get_doorbell(client);
216 doorbell->db_status = GUC_DOORBELL_DISABLED;
218 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
219 * to go to zero after updating db_status before we call the GuC to
220 * release the doorbell
222 if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
223 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
226 static int create_doorbell(struct intel_guc_client *client)
230 if (WARN_ON(!has_doorbell(client)))
231 return -ENODEV; /* internal setup error, should never happen */
233 __update_doorbell_desc(client, client->doorbell_id);
234 __init_doorbell(client);
236 ret = __guc_allocate_doorbell(client->guc, client->stage_id);
238 __fini_doorbell(client);
239 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
240 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
241 client->stage_id, ret);
248 static int destroy_doorbell(struct intel_guc_client *client)
252 GEM_BUG_ON(!has_doorbell(client));
254 __fini_doorbell(client);
255 ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
257 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
258 client->stage_id, ret);
260 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
265 static unsigned long __select_cacheline(struct intel_guc *guc)
267 unsigned long offset;
269 /* Doorbell uses a single cache line within a page */
270 offset = offset_in_page(guc->db_cacheline);
272 /* Moving to next cache line to reduce contention */
273 guc->db_cacheline += cache_line_size();
275 DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
276 offset, guc->db_cacheline, cache_line_size());
280 static inline struct guc_process_desc *
281 __get_process_desc(struct intel_guc_client *client)
283 return client->vaddr + client->proc_desc_offset;
287 * Initialise the process descriptor shared with the GuC firmware.
289 static void guc_proc_desc_init(struct intel_guc_client *client)
291 struct guc_process_desc *desc;
293 desc = memset(__get_process_desc(client), 0, sizeof(*desc));
296 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
297 * space for ring3 clients (set them as in mmap_ioctl) or kernel
298 * space for kernel clients (map on demand instead? May make debug
299 * easier to have it mapped).
301 desc->wq_base_addr = 0;
302 desc->db_base_addr = 0;
304 desc->stage_id = client->stage_id;
305 desc->wq_size_bytes = GUC_WQ_SIZE;
306 desc->wq_status = WQ_STATUS_ACTIVE;
307 desc->priority = client->priority;
310 static void guc_proc_desc_fini(struct intel_guc_client *client)
312 struct guc_process_desc *desc;
314 desc = __get_process_desc(client);
315 memset(desc, 0, sizeof(*desc));
318 static int guc_stage_desc_pool_create(struct intel_guc *guc)
320 struct i915_vma *vma;
323 vma = intel_guc_allocate_vma(guc,
324 PAGE_ALIGN(sizeof(struct guc_stage_desc) *
325 GUC_MAX_STAGE_DESCRIPTORS));
329 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
331 i915_vma_unpin_and_release(&vma, 0);
332 return PTR_ERR(vaddr);
335 guc->stage_desc_pool = vma;
336 guc->stage_desc_pool_vaddr = vaddr;
337 ida_init(&guc->stage_ids);
342 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
344 ida_destroy(&guc->stage_ids);
345 i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
349 * Initialise/clear the stage descriptor shared with the GuC firmware.
351 * This descriptor tells the GuC where (in GGTT space) to find the important
352 * data structures relating to this client (doorbell, process descriptor,
355 static void guc_stage_desc_init(struct intel_guc_client *client)
357 struct intel_guc *guc = client->guc;
358 struct guc_stage_desc *desc;
361 desc = __get_stage_desc(client);
362 memset(desc, 0, sizeof(*desc));
364 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
365 GUC_STAGE_DESC_ATTR_KERNEL;
366 if (is_high_priority(client))
367 desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
368 desc->stage_id = client->stage_id;
369 desc->priority = client->priority;
370 desc->db_id = client->doorbell_id;
373 * The doorbell, process descriptor, and workqueue are all parts
374 * of the client object, which the GuC will reference via the GGTT
376 gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
377 desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
378 client->doorbell_offset;
379 desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
380 desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
381 desc->process_desc = gfx_addr + client->proc_desc_offset;
382 desc->wq_addr = gfx_addr + GUC_DB_SIZE;
383 desc->wq_size = GUC_WQ_SIZE;
385 desc->desc_private = ptr_to_u64(client);
388 static void guc_stage_desc_fini(struct intel_guc_client *client)
390 struct guc_stage_desc *desc;
392 desc = __get_stage_desc(client);
393 memset(desc, 0, sizeof(*desc));
396 /* Construct a Work Item and append it to the GuC's Work Queue */
397 static void guc_wq_item_append(struct intel_guc_client *client,
398 u32 target_engine, u32 context_desc,
399 u32 ring_tail, u32 fence_id)
401 /* wqi_len is in DWords, and does not include the one-word header */
402 const size_t wqi_size = sizeof(struct guc_wq_item);
403 const u32 wqi_len = wqi_size / sizeof(u32) - 1;
404 struct guc_process_desc *desc = __get_process_desc(client);
405 struct guc_wq_item *wqi;
408 lockdep_assert_held(&client->wq_lock);
410 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
411 * should not have the case where structure wqi is across page, neither
412 * wrapped to the beginning. This simplifies the implementation below.
414 * XXX: if not the case, we need save data to a temp wqi and copy it to
415 * workqueue buffer dw by dw.
417 BUILD_BUG_ON(wqi_size != 16);
419 /* We expect the WQ to be active if we're appending items to it */
420 GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
422 /* Free space is guaranteed. */
423 wq_off = READ_ONCE(desc->tail);
424 GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
425 GUC_WQ_SIZE) < wqi_size);
426 GEM_BUG_ON(wq_off & (wqi_size - 1));
428 /* WQ starts from the page after doorbell / process_desc */
429 wqi = client->vaddr + wq_off + GUC_DB_SIZE;
431 if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
432 wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
434 /* Now fill in the 4-word work queue item */
435 wqi->header = WQ_TYPE_INORDER |
436 (wqi_len << WQ_LEN_SHIFT) |
437 (target_engine << WQ_TARGET_SHIFT) |
439 wqi->context_desc = context_desc;
440 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
441 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
442 wqi->fence_id = fence_id;
445 /* Make the update visible to GuC */
446 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
449 static void guc_ring_doorbell(struct intel_guc_client *client)
451 struct guc_doorbell_info *db;
454 lockdep_assert_held(&client->wq_lock);
456 /* pointer of current doorbell cacheline */
457 db = __get_doorbell(client);
460 * We're not expecting the doorbell cookie to change behind our back,
461 * we also need to treat 0 as a reserved value.
463 cookie = READ_ONCE(db->cookie);
464 WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
466 /* XXX: doorbell was lost and need to acquire it again */
467 GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
470 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
472 struct intel_guc_client *client = guc->execbuf_client;
473 struct intel_engine_cs *engine = rq->engine;
474 u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
475 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
477 guc_wq_item_append(client, engine->guc_id, ctx_desc,
478 ring_tail, rq->fence.seqno);
479 guc_ring_doorbell(client);
483 * When we're doing submissions using regular execlists backend, writing to
484 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
485 * pinned in mappable aperture portion of GGTT are visible to command streamer.
486 * Writes done by GuC on our behalf are not guaranteeing such ordering,
487 * therefore, to ensure the flush, we're issuing a POSTING READ.
489 static void flush_ggtt_writes(struct i915_vma *vma)
491 struct drm_i915_private *i915 = vma->vm->i915;
493 if (i915_vma_is_map_and_fenceable(vma))
494 intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
497 static void guc_submit(struct intel_engine_cs *engine,
498 struct i915_request **out,
499 struct i915_request **end)
501 struct intel_guc *guc = &engine->gt->uc.guc;
502 struct intel_guc_client *client = guc->execbuf_client;
504 spin_lock(&client->wq_lock);
507 struct i915_request *rq = *out++;
509 flush_ggtt_writes(rq->ring->vma);
510 guc_add_request(guc, rq);
511 } while (out != end);
513 spin_unlock(&client->wq_lock);
516 static inline int rq_prio(const struct i915_request *rq)
518 return rq->sched.attr.priority | __NO_PREEMPTION;
521 static struct i915_request *schedule_in(struct i915_request *rq, int idx)
523 trace_i915_request_in(rq, idx);
526 * Currently we are not tracking the rq->context being inflight
527 * (ce->inflight = rq->engine). It is only used by the execlists
528 * backend at the moment, a similar counting strategy would be
529 * required if we generalise the inflight tracking.
532 intel_gt_pm_get(rq->engine->gt);
533 return i915_request_get(rq);
536 static void schedule_out(struct i915_request *rq)
538 trace_i915_request_out(rq);
540 intel_gt_pm_put(rq->engine->gt);
541 i915_request_put(rq);
544 static void __guc_dequeue(struct intel_engine_cs *engine)
546 struct intel_engine_execlists * const execlists = &engine->execlists;
547 struct i915_request **first = execlists->inflight;
548 struct i915_request ** const last_port = first + execlists->port_mask;
549 struct i915_request *last = first[0];
550 struct i915_request **port;
554 lockdep_assert_held(&engine->active.lock);
564 * We write directly into the execlists->inflight queue and don't use
565 * the execlists->pending queue, as we don't have a distinct switch
569 while ((rb = rb_first_cached(&execlists->queue))) {
570 struct i915_priolist *p = to_priolist(rb);
571 struct i915_request *rq, *rn;
574 priolist_for_each_request_consume(rq, rn, p, i) {
575 if (last && rq->hw_context != last->hw_context) {
576 if (port == last_port)
579 *port = schedule_in(last,
580 port - execlists->inflight);
584 list_del_init(&rq->sched.link);
585 __i915_request_submit(rq);
590 rb_erase_cached(&p->node, &execlists->queue);
591 i915_priolist_free(p);
594 execlists->queue_priority_hint =
595 rb ? to_priolist(rb)->priority : INT_MIN;
597 *port = schedule_in(last, port - execlists->inflight);
599 guc_submit(engine, first, port);
601 execlists->active = execlists->inflight;
604 static void guc_submission_tasklet(unsigned long data)
606 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
607 struct intel_engine_execlists * const execlists = &engine->execlists;
608 struct i915_request **port, *rq;
611 spin_lock_irqsave(&engine->active.lock, flags);
613 for (port = execlists->inflight; (rq = *port); port++) {
614 if (!i915_request_completed(rq))
619 if (port != execlists->inflight) {
620 int idx = port - execlists->inflight;
621 int rem = ARRAY_SIZE(execlists->inflight) - idx;
622 memmove(execlists->inflight, port, rem * sizeof(*port));
625 __guc_dequeue(engine);
627 spin_unlock_irqrestore(&engine->active.lock, flags);
630 static void guc_reset_prepare(struct intel_engine_cs *engine)
632 struct intel_engine_execlists * const execlists = &engine->execlists;
634 GEM_TRACE("%s\n", engine->name);
637 * Prevent request submission to the hardware until we have
638 * completed the reset in i915_gem_reset_finish(). If a request
639 * is completed by one engine, it may then queue a request
640 * to a second via its execlists->tasklet *just* as we are
641 * calling engine->init_hw() and also writing the ELSP.
642 * Turning off the execlists->tasklet until the reset is over
645 __tasklet_disable_sync_once(&execlists->tasklet);
649 cancel_port_requests(struct intel_engine_execlists * const execlists)
651 struct i915_request * const *port, *rq;
653 /* Note we are only using the inflight and not the pending queue */
655 for (port = execlists->active; (rq = *port); port++)
658 memset(execlists->inflight, 0, sizeof(execlists->inflight));
661 static void guc_reset(struct intel_engine_cs *engine, bool stalled)
663 struct intel_engine_execlists * const execlists = &engine->execlists;
664 struct i915_request *rq;
667 spin_lock_irqsave(&engine->active.lock, flags);
669 cancel_port_requests(execlists);
671 /* Push back any incomplete requests for replay after the reset. */
672 rq = execlists_unwind_incomplete_requests(execlists);
676 if (!i915_request_started(rq))
679 __i915_request_reset(rq, stalled);
680 intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
683 spin_unlock_irqrestore(&engine->active.lock, flags);
686 static void guc_cancel_requests(struct intel_engine_cs *engine)
688 struct intel_engine_execlists * const execlists = &engine->execlists;
689 struct i915_request *rq, *rn;
693 GEM_TRACE("%s\n", engine->name);
696 * Before we call engine->cancel_requests(), we should have exclusive
697 * access to the submission state. This is arranged for us by the
698 * caller disabling the interrupt generation, the tasklet and other
699 * threads that may then access the same state, giving us a free hand
700 * to reset state. However, we still need to let lockdep be aware that
701 * we know this state may be accessed in hardirq context, so we
702 * disable the irq around this manipulation and we want to keep
703 * the spinlock focused on its duties and not accidentally conflate
704 * coverage to the submission's irq state. (Similarly, although we
705 * shouldn't need to disable irq around the manipulation of the
706 * submission's irq state, we also wish to remind ourselves that
709 spin_lock_irqsave(&engine->active.lock, flags);
711 /* Cancel the requests on the HW and clear the ELSP tracker. */
712 cancel_port_requests(execlists);
714 /* Mark all executing requests as skipped. */
715 list_for_each_entry(rq, &engine->active.requests, sched.link) {
716 if (!i915_request_signaled(rq))
717 dma_fence_set_error(&rq->fence, -EIO);
719 i915_request_mark_complete(rq);
722 /* Flush the queued requests to the timeline list (for retiring). */
723 while ((rb = rb_first_cached(&execlists->queue))) {
724 struct i915_priolist *p = to_priolist(rb);
727 priolist_for_each_request_consume(rq, rn, p, i) {
728 list_del_init(&rq->sched.link);
729 __i915_request_submit(rq);
730 dma_fence_set_error(&rq->fence, -EIO);
731 i915_request_mark_complete(rq);
734 rb_erase_cached(&p->node, &execlists->queue);
735 i915_priolist_free(p);
738 /* Remaining _unready_ requests will be nop'ed when submitted */
740 execlists->queue_priority_hint = INT_MIN;
741 execlists->queue = RB_ROOT_CACHED;
743 spin_unlock_irqrestore(&engine->active.lock, flags);
746 static void guc_reset_finish(struct intel_engine_cs *engine)
748 struct intel_engine_execlists * const execlists = &engine->execlists;
750 if (__tasklet_enable(&execlists->tasklet))
751 /* And kick in case we missed a new request submission. */
752 tasklet_hi_schedule(&execlists->tasklet);
754 GEM_TRACE("%s: depth->%d\n", engine->name,
755 atomic_read(&execlists->tasklet.count));
759 * Everything below here is concerned with setup & teardown, and is
760 * therefore not part of the somewhat time-critical batch-submission
761 * path of guc_submit() above.
764 /* Check that a doorbell register is in the expected state */
765 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
769 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
771 valid = __doorbell_valid(guc, db_id);
773 if (test_bit(db_id, guc->doorbell_bitmap) == valid)
776 DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
777 db_id, yesno(valid));
782 static bool guc_verify_doorbells(struct intel_guc *guc)
784 bool doorbells_ok = true;
787 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
788 if (!doorbell_ok(guc, db_id))
789 doorbells_ok = false;
795 * guc_client_alloc() - Allocate an intel_guc_client
796 * @guc: the intel_guc structure
797 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
798 * The kernel client to replace ExecList submission is created with
799 * NORMAL priority. Priority of a client for scheduler can be HIGH,
800 * while a preemption context can use CRITICAL.
802 * Return: An intel_guc_client object if success, else NULL.
804 static struct intel_guc_client *
805 guc_client_alloc(struct intel_guc *guc, u32 priority)
807 struct intel_guc_client *client;
808 struct i915_vma *vma;
812 client = kzalloc(sizeof(*client), GFP_KERNEL);
814 return ERR_PTR(-ENOMEM);
817 client->priority = priority;
818 client->doorbell_id = GUC_DOORBELL_INVALID;
819 spin_lock_init(&client->wq_lock);
821 ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
826 client->stage_id = ret;
828 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
829 vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
835 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
838 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
840 ret = PTR_ERR(vaddr);
843 client->vaddr = vaddr;
845 ret = reserve_doorbell(client);
849 client->doorbell_offset = __select_cacheline(guc);
852 * Since the doorbell only requires a single cacheline, we can save
853 * space by putting the application process descriptor in the same
854 * page. Use the half of the page that doesn't include the doorbell.
856 if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
857 client->proc_desc_offset = 0;
859 client->proc_desc_offset = (GUC_DB_SIZE / 2);
861 DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
862 priority, client, client->stage_id);
863 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
864 client->doorbell_id, client->doorbell_offset);
869 i915_gem_object_unpin_map(client->vma->obj);
871 i915_vma_unpin_and_release(&client->vma, 0);
873 ida_simple_remove(&guc->stage_ids, client->stage_id);
879 static void guc_client_free(struct intel_guc_client *client)
881 unreserve_doorbell(client);
882 i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
883 ida_simple_remove(&client->guc->stage_ids, client->stage_id);
887 static inline bool ctx_save_restore_disabled(struct intel_context *ce)
889 u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
891 #define SR_DISABLED \
892 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
893 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
895 return (sr & SR_DISABLED) == SR_DISABLED;
900 static int guc_clients_create(struct intel_guc *guc)
902 struct intel_guc_client *client;
904 GEM_BUG_ON(guc->execbuf_client);
906 client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
907 if (IS_ERR(client)) {
908 DRM_ERROR("Failed to create GuC client for submission!\n");
909 return PTR_ERR(client);
911 guc->execbuf_client = client;
916 static void guc_clients_destroy(struct intel_guc *guc)
918 struct intel_guc_client *client;
920 client = fetch_and_zero(&guc->execbuf_client);
922 guc_client_free(client);
925 static int __guc_client_enable(struct intel_guc_client *client)
929 guc_proc_desc_init(client);
930 guc_stage_desc_init(client);
932 ret = create_doorbell(client);
939 guc_stage_desc_fini(client);
940 guc_proc_desc_fini(client);
944 static void __guc_client_disable(struct intel_guc_client *client)
947 * By the time we're here, GuC may have already been reset. if that is
948 * the case, instead of trying (in vain) to communicate with it, let's
949 * just cleanup the doorbell HW and our internal state.
951 if (intel_guc_is_running(client->guc))
952 destroy_doorbell(client);
954 __fini_doorbell(client);
956 guc_stage_desc_fini(client);
957 guc_proc_desc_fini(client);
960 static int guc_clients_enable(struct intel_guc *guc)
962 return __guc_client_enable(guc->execbuf_client);
965 static void guc_clients_disable(struct intel_guc *guc)
967 if (guc->execbuf_client)
968 __guc_client_disable(guc->execbuf_client);
972 * Set up the memory resources to be shared with the GuC (via the GGTT)
973 * at firmware loading time.
975 int intel_guc_submission_init(struct intel_guc *guc)
979 if (guc->stage_desc_pool)
982 ret = guc_stage_desc_pool_create(guc);
986 * Keep static analysers happy, let them know that we allocated the
987 * vma after testing that it didn't exist earlier.
989 GEM_BUG_ON(!guc->stage_desc_pool);
991 WARN_ON(!guc_verify_doorbells(guc));
992 ret = guc_clients_create(guc);
999 guc_stage_desc_pool_destroy(guc);
1003 void intel_guc_submission_fini(struct intel_guc *guc)
1005 guc_clients_destroy(guc);
1006 WARN_ON(!guc_verify_doorbells(guc));
1008 if (guc->stage_desc_pool)
1009 guc_stage_desc_pool_destroy(guc);
1012 static void guc_interrupts_capture(struct intel_gt *gt)
1014 struct intel_rps *rps = >->i915->gt_pm.rps;
1015 struct intel_uncore *uncore = gt->uncore;
1016 struct intel_engine_cs *engine;
1017 enum intel_engine_id id;
1020 /* tell all command streamers to forward interrupts (but not vblank)
1023 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
1024 for_each_engine(engine, gt, id)
1025 ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
1027 /* route USER_INTERRUPT to Host, all others are sent to GuC. */
1028 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
1029 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1030 /* These three registers have the same bit definitions */
1031 intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
1032 intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
1033 intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
1036 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
1037 * (unmasked) PM interrupts to the GuC. All other bits of this
1038 * register *disable* generation of a specific interrupt.
1040 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
1041 * writing to the PM interrupt mask register, i.e. interrupts
1042 * that must not be disabled.
1044 * If the GuC is handling these interrupts, then we must not let
1045 * the PM code disable ANY interrupt that the GuC is expecting.
1046 * So for each ENABLED (0) bit in this register, we must SET the
1047 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
1048 * GuC needs ARAT expired interrupt unmasked hence it is set in
1051 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1052 * result in the register bit being left SET!
1054 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1055 rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1058 static void guc_interrupts_release(struct intel_gt *gt)
1060 struct intel_rps *rps = >->i915->gt_pm.rps;
1061 struct intel_uncore *uncore = gt->uncore;
1062 struct intel_engine_cs *engine;
1063 enum intel_engine_id id;
1067 * tell all command streamers NOT to forward interrupts or vblank
1070 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1071 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1072 for_each_engine(engine, gt, id)
1073 ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
1075 /* route all GT interrupts to the host */
1076 intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
1077 intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
1078 intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
1080 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1081 rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1084 static void guc_set_default_submission(struct intel_engine_cs *engine)
1087 * We inherit a bunch of functions from execlists that we'd like
1090 * engine->submit_request = execlists_submit_request;
1091 * engine->cancel_requests = execlists_cancel_requests;
1092 * engine->schedule = execlists_schedule;
1094 * But we need to override the actual submission backend in order
1095 * to talk to the GuC.
1097 intel_execlists_set_default_submission(engine);
1099 engine->execlists.tasklet.func = guc_submission_tasklet;
1101 /* do not use execlists park/unpark */
1102 engine->park = engine->unpark = NULL;
1104 engine->reset.prepare = guc_reset_prepare;
1105 engine->reset.reset = guc_reset;
1106 engine->reset.finish = guc_reset_finish;
1108 engine->cancel_requests = guc_cancel_requests;
1110 engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
1111 engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
1114 * For the breadcrumb irq to work we need the interrupts to stay
1115 * enabled. However, on all platforms on which we'll have support for
1116 * GuC submission we don't allow disabling the interrupts at runtime, so
1117 * we're always safe with the current flow.
1119 GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
1122 int intel_guc_submission_enable(struct intel_guc *guc)
1124 struct intel_gt *gt = guc_to_gt(guc);
1125 struct intel_engine_cs *engine;
1126 enum intel_engine_id id;
1129 err = i915_inject_load_error(gt->i915, -ENXIO);
1134 * We're using GuC work items for submitting work through GuC. Since
1135 * we're coalescing multiple requests from a single context into a
1136 * single work item prior to assigning it to execlist_port, we can
1137 * never have more work items than the total number of ports (for all
1138 * engines). The GuC firmware is controlling the HEAD of work queue,
1139 * and it is guaranteed that it will remove the work item from the
1140 * queue before our request is completed.
1142 BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
1143 sizeof(struct guc_wq_item) *
1144 I915_NUM_ENGINES > GUC_WQ_SIZE);
1146 GEM_BUG_ON(!guc->execbuf_client);
1148 err = guc_clients_enable(guc);
1152 /* Take over from manual control of ELSP (execlists) */
1153 guc_interrupts_capture(gt);
1155 for_each_engine(engine, gt, id) {
1156 engine->set_default_submission = guc_set_default_submission;
1157 engine->set_default_submission(engine);
1163 void intel_guc_submission_disable(struct intel_guc *guc)
1165 struct intel_gt *gt = guc_to_gt(guc);
1167 GEM_BUG_ON(gt->awake); /* GT should be parked first */
1169 guc_interrupts_release(gt);
1170 guc_clients_disable(guc);
1173 static bool __guc_submission_support(struct intel_guc *guc)
1175 /* XXX: GuC submission is unavailable for now */
1178 if (!intel_guc_is_supported(guc))
1181 return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
1184 void intel_guc_submission_init_early(struct intel_guc *guc)
1186 guc->submission_supported = __guc_submission_support(guc);
1189 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1190 #include "selftest_guc.c"