1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016 Intel Corporation
6 #include <linux/string_helpers.h>
8 #include <drm/drm_print.h>
10 #include "gem/i915_gem_context.h"
11 #include "gem/i915_gem_internal.h"
12 #include "gt/intel_gt_print.h"
13 #include "gt/intel_gt_regs.h"
15 #include "i915_cmd_parser.h"
19 #include "intel_breadcrumbs.h"
20 #include "intel_context.h"
21 #include "intel_engine.h"
22 #include "intel_engine_pm.h"
23 #include "intel_engine_regs.h"
24 #include "intel_engine_user.h"
25 #include "intel_execlists_submission.h"
27 #include "intel_gt_mcr.h"
28 #include "intel_gt_pm.h"
29 #include "intel_gt_requests.h"
30 #include "intel_lrc.h"
31 #include "intel_lrc_reg.h"
32 #include "intel_reset.h"
33 #include "intel_ring.h"
34 #include "uc/intel_guc_submission.h"
36 /* Haswell does have the CXT_SIZE register however it does not appear to be
37 * valid. Now, docs explain in dwords what is in the context object. The full
38 * size is 70720 bytes, however, the power context and execlist context will
39 * never be saved (power context is stored elsewhere, and execlists don't work
40 * on HSW) - so the final size, including the extra state required for the
41 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
43 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
45 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
46 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
47 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
48 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
50 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
52 #define MAX_MMIO_BASES 3
56 /* mmio bases table *must* be sorted in reverse graphics_ver order */
57 struct engine_mmio_base {
60 } mmio_bases[MAX_MMIO_BASES];
63 static const struct engine_info intel_engines[] = {
65 .class = RENDER_CLASS,
68 { .graphics_ver = 1, .base = RENDER_RING_BASE }
72 .class = COPY_ENGINE_CLASS,
75 { .graphics_ver = 6, .base = BLT_RING_BASE }
79 .class = COPY_ENGINE_CLASS,
82 { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
86 .class = COPY_ENGINE_CLASS,
89 { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
93 .class = COPY_ENGINE_CLASS,
96 { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
100 .class = COPY_ENGINE_CLASS,
103 { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
107 .class = COPY_ENGINE_CLASS,
110 { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
114 .class = COPY_ENGINE_CLASS,
117 { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
121 .class = COPY_ENGINE_CLASS,
124 { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
128 .class = COPY_ENGINE_CLASS,
131 { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
135 .class = VIDEO_DECODE_CLASS,
138 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
139 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
140 { .graphics_ver = 4, .base = BSD_RING_BASE }
144 .class = VIDEO_DECODE_CLASS,
147 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
148 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
152 .class = VIDEO_DECODE_CLASS,
155 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
159 .class = VIDEO_DECODE_CLASS,
162 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
166 .class = VIDEO_DECODE_CLASS,
169 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
173 .class = VIDEO_DECODE_CLASS,
176 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
180 .class = VIDEO_DECODE_CLASS,
183 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
187 .class = VIDEO_DECODE_CLASS,
190 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
194 .class = VIDEO_ENHANCEMENT_CLASS,
197 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
198 { .graphics_ver = 7, .base = VEBOX_RING_BASE }
202 .class = VIDEO_ENHANCEMENT_CLASS,
205 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
209 .class = VIDEO_ENHANCEMENT_CLASS,
212 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
216 .class = VIDEO_ENHANCEMENT_CLASS,
219 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
223 .class = COMPUTE_CLASS,
226 { .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE }
230 .class = COMPUTE_CLASS,
233 { .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE }
237 .class = COMPUTE_CLASS,
240 { .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE }
244 .class = COMPUTE_CLASS,
247 { .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE }
251 .class = OTHER_CLASS,
252 .instance = OTHER_GSC_INSTANCE,
254 { .graphics_ver = 12, .base = MTL_GSC_RING_BASE }
260 * intel_engine_context_size() - return the size of the context for an engine
262 * @class: engine class
264 * Each engine class may require a different amount of space for a context
267 * Return: size (in bytes) of an engine class specific context image
269 * Note: this size includes the HWSP, which is part of the context image
270 * in LRC mode, but does not include the "shared data page" used with
271 * GuC submission. The caller should account for this if using the GuC.
273 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
275 struct intel_uncore *uncore = gt->uncore;
278 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
284 switch (GRAPHICS_VER(gt->i915)) {
286 MISSING_CASE(GRAPHICS_VER(gt->i915));
287 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
290 return GEN11_LR_CONTEXT_RENDER_SIZE;
292 return GEN9_LR_CONTEXT_RENDER_SIZE;
294 return GEN8_LR_CONTEXT_RENDER_SIZE;
296 if (IS_HASWELL(gt->i915))
297 return HSW_CXT_TOTAL_SIZE;
299 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
300 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
303 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
304 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
309 * There is a discrepancy here between the size reported
310 * by the register and the size of the context layout
311 * in the docs. Both are described as authorative!
313 * The discrepancy is on the order of a few cachelines,
314 * but the total is under one page (4k), which is our
315 * minimum allocation anyway so it should all come
318 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
319 drm_dbg(>->i915->drm,
320 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
321 GRAPHICS_VER(gt->i915), cxt_size * 64,
323 return round_up(cxt_size * 64, PAGE_SIZE);
326 /* For the special day when i810 gets merged. */
334 case VIDEO_DECODE_CLASS:
335 case VIDEO_ENHANCEMENT_CLASS:
336 case COPY_ENGINE_CLASS:
338 if (GRAPHICS_VER(gt->i915) < 8)
340 return GEN8_LR_CONTEXT_OTHER_SIZE;
344 static u32 __engine_mmio_base(struct drm_i915_private *i915,
345 const struct engine_mmio_base *bases)
349 for (i = 0; i < MAX_MMIO_BASES; i++)
350 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
353 GEM_BUG_ON(i == MAX_MMIO_BASES);
354 GEM_BUG_ON(!bases[i].base);
356 return bases[i].base;
359 static void __sprint_engine_name(struct intel_engine_cs *engine)
362 * Before we know what the uABI name for this engine will be,
363 * we still would like to keep track of this engine in the debug logs.
364 * We throw in a ' here as a reminder that this isn't its final name.
366 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
367 intel_engine_class_repr(engine->class),
368 engine->instance) >= sizeof(engine->name));
371 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
374 * Though they added more rings on g4x/ilk, they did not add
375 * per-engine HWSTAM until gen6.
377 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
380 if (GRAPHICS_VER(engine->i915) >= 3)
381 ENGINE_WRITE(engine, RING_HWSTAM, mask);
383 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
386 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
388 /* Mask off all writes into the unknown HWSP */
389 intel_engine_set_hwsp_writemask(engine, ~0u);
392 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
394 GEM_DEBUG_WARN_ON(iir);
397 static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
402 static const u32 engine_reset_domains[] = {
403 [RCS0] = GEN11_GRDOM_RENDER,
404 [BCS0] = GEN11_GRDOM_BLT,
405 [BCS1] = XEHPC_GRDOM_BLT1,
406 [BCS2] = XEHPC_GRDOM_BLT2,
407 [BCS3] = XEHPC_GRDOM_BLT3,
408 [BCS4] = XEHPC_GRDOM_BLT4,
409 [BCS5] = XEHPC_GRDOM_BLT5,
410 [BCS6] = XEHPC_GRDOM_BLT6,
411 [BCS7] = XEHPC_GRDOM_BLT7,
412 [BCS8] = XEHPC_GRDOM_BLT8,
413 [VCS0] = GEN11_GRDOM_MEDIA,
414 [VCS1] = GEN11_GRDOM_MEDIA2,
415 [VCS2] = GEN11_GRDOM_MEDIA3,
416 [VCS3] = GEN11_GRDOM_MEDIA4,
417 [VCS4] = GEN11_GRDOM_MEDIA5,
418 [VCS5] = GEN11_GRDOM_MEDIA6,
419 [VCS6] = GEN11_GRDOM_MEDIA7,
420 [VCS7] = GEN11_GRDOM_MEDIA8,
421 [VECS0] = GEN11_GRDOM_VECS,
422 [VECS1] = GEN11_GRDOM_VECS2,
423 [VECS2] = GEN11_GRDOM_VECS3,
424 [VECS3] = GEN11_GRDOM_VECS4,
425 [CCS0] = GEN11_GRDOM_RENDER,
426 [CCS1] = GEN11_GRDOM_RENDER,
427 [CCS2] = GEN11_GRDOM_RENDER,
428 [CCS3] = GEN11_GRDOM_RENDER,
429 [GSC0] = GEN12_GRDOM_GSC,
431 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
432 !engine_reset_domains[id]);
433 reset_domain = engine_reset_domains[id];
435 static const u32 engine_reset_domains[] = {
436 [RCS0] = GEN6_GRDOM_RENDER,
437 [BCS0] = GEN6_GRDOM_BLT,
438 [VCS0] = GEN6_GRDOM_MEDIA,
439 [VCS1] = GEN8_GRDOM_MEDIA2,
440 [VECS0] = GEN6_GRDOM_VECS,
442 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
443 !engine_reset_domains[id]);
444 reset_domain = engine_reset_domains[id];
450 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
453 const struct engine_info *info = &intel_engines[id];
454 struct drm_i915_private *i915 = gt->i915;
455 struct intel_engine_cs *engine;
458 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
459 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
460 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
461 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
463 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
466 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
469 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
472 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
475 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
479 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
481 INIT_LIST_HEAD(&engine->pinned_contexts_list);
483 engine->legacy_idx = INVALID_ENGINE;
484 engine->mask = BIT(id);
485 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915),
489 engine->uncore = gt->uncore;
490 guc_class = engine_class_to_guc_class(info->class);
491 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
492 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
494 engine->irq_handler = nop_irq_handler;
496 engine->class = info->class;
497 engine->instance = info->instance;
498 engine->logical_mask = BIT(logical_instance);
499 __sprint_engine_name(engine);
501 if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
502 __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
503 engine->class == RENDER_CLASS)
504 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
506 /* features common between engines sharing EUs */
507 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
508 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
509 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
512 engine->props.heartbeat_interval_ms =
513 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
514 engine->props.max_busywait_duration_ns =
515 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
516 engine->props.preempt_timeout_ms =
517 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
518 engine->props.stop_timeout_ms =
519 CONFIG_DRM_I915_STOP_TIMEOUT;
520 engine->props.timeslice_duration_ms =
521 CONFIG_DRM_I915_TIMESLICE_DURATION;
524 * Mid-thread pre-emption is not available in Gen12. Unfortunately,
525 * some compute workloads run quite long threads. That means they get
526 * reset due to not pre-empting in a timely manner. So, bump the
527 * pre-emption timeout value to be much higher for compute engines.
529 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
530 engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE;
532 /* Cap properties according to any system limits */
533 #define CLAMP_PROP(field) \
535 u64 clamp = intel_clamp_##field(engine, engine->props.field); \
536 if (clamp != engine->props.field) { \
537 drm_notice(&engine->i915->drm, \
538 "Warning, clamping %s to %lld to prevent overflow\n", \
540 engine->props.field = clamp; \
544 CLAMP_PROP(heartbeat_interval_ms);
545 CLAMP_PROP(max_busywait_duration_ns);
546 CLAMP_PROP(preempt_timeout_ms);
547 CLAMP_PROP(stop_timeout_ms);
548 CLAMP_PROP(timeslice_duration_ms);
552 engine->defaults = engine->props; /* never to change again */
554 engine->context_size = intel_engine_context_size(gt, engine->class);
555 if (WARN_ON(engine->context_size > BIT(20)))
556 engine->context_size = 0;
557 if (engine->context_size)
558 DRIVER_CAPS(i915)->has_logical_contexts = true;
560 ewma__engine_latency_init(&engine->latency);
561 seqcount_init(&engine->stats.execlists.lock);
563 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
565 /* Scrub mmio state on takeover */
566 intel_engine_sanitize_mmio(engine);
568 gt->engine_class[info->class][info->instance] = engine;
569 gt->engine[id] = engine;
574 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
576 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
581 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
583 value = min(value, jiffies_to_nsecs(2));
588 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
591 * NB: The GuC API only supports 32bit values. However, the limit is further
592 * reduced due to internal calculations which would otherwise overflow.
594 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
595 value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
597 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
602 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
604 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
609 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
612 * NB: The GuC API only supports 32bit values. However, the limit is further
613 * reduced due to internal calculations which would otherwise overflow.
615 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
616 value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
618 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
623 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
625 struct drm_i915_private *i915 = engine->i915;
627 if (engine->class == VIDEO_DECODE_CLASS) {
629 * HEVC support is present on first engine instance
630 * before Gen11 and on all instances afterwards.
632 if (GRAPHICS_VER(i915) >= 11 ||
633 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
634 engine->uabi_capabilities |=
635 I915_VIDEO_CLASS_CAPABILITY_HEVC;
638 * SFC block is present only on even logical engine
641 if ((GRAPHICS_VER(i915) >= 11 &&
642 (engine->gt->info.vdbox_sfc_access &
643 BIT(engine->instance))) ||
644 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
645 engine->uabi_capabilities |=
646 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
647 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
648 if (GRAPHICS_VER(i915) >= 9 &&
649 engine->gt->info.sfc_mask & BIT(engine->instance))
650 engine->uabi_capabilities |=
651 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
655 static void intel_setup_engine_capabilities(struct intel_gt *gt)
657 struct intel_engine_cs *engine;
658 enum intel_engine_id id;
660 for_each_engine(engine, gt, id)
661 __setup_engine_capabilities(engine);
665 * intel_engines_release() - free the resources allocated for Command Streamers
666 * @gt: pointer to struct intel_gt
668 void intel_engines_release(struct intel_gt *gt)
670 struct intel_engine_cs *engine;
671 enum intel_engine_id id;
674 * Before we release the resources held by engine, we must be certain
675 * that the HW is no longer accessing them -- having the GPU scribble
676 * to or read from a page being used for something else causes no end
679 * The GPU should be reset by this point, but assume the worst just
680 * in case we aborted before completely initialising the engines.
682 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
683 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
684 __intel_gt_reset(gt, ALL_ENGINES);
686 /* Decouple the backend; but keep the layout for late GPU resets */
687 for_each_engine(engine, gt, id) {
688 if (!engine->release)
691 intel_wakeref_wait_for_idle(&engine->wakeref);
692 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
694 engine->release(engine);
695 engine->release = NULL;
697 memset(&engine->reset, 0, sizeof(engine->reset));
701 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
703 if (!engine->request_pool)
706 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
709 void intel_engines_free(struct intel_gt *gt)
711 struct intel_engine_cs *engine;
712 enum intel_engine_id id;
714 /* Free the requests! dma-resv keeps fences around for an eternity */
717 for_each_engine(engine, gt, id) {
718 intel_engine_free_request_pool(engine);
720 gt->engine[id] = NULL;
725 bool gen11_vdbox_has_sfc(struct intel_gt *gt,
726 unsigned int physical_vdbox,
727 unsigned int logical_vdbox, u16 vdbox_mask)
729 struct drm_i915_private *i915 = gt->i915;
732 * In Gen11, only even numbered logical VDBOXes are hooked
733 * up to an SFC (Scaler & Format Converter) unit.
734 * In Gen12, Even numbered physical instance always are connected
735 * to an SFC. Odd numbered physical instances have SFC only if
736 * previous even instance is fused off.
738 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
739 * in the fuse register that tells us whether a specific SFC is present.
741 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
743 else if (MEDIA_VER(i915) >= 12)
744 return (physical_vdbox % 2 == 0) ||
745 !(BIT(physical_vdbox - 1) & vdbox_mask);
746 else if (MEDIA_VER(i915) == 11)
747 return logical_vdbox % 2 == 0;
752 static void engine_mask_apply_media_fuses(struct intel_gt *gt)
754 struct drm_i915_private *i915 = gt->i915;
755 unsigned int logical_vdbox = 0;
757 u32 media_fuse, fuse1;
761 if (MEDIA_VER(gt->i915) < 11)
765 * On newer platforms the fusing register is called 'enable' and has
766 * enable semantics, while on older platforms it is called 'disable'
767 * and bits have disable semantices.
769 media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
770 if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
771 media_fuse = ~media_fuse;
773 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
774 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
775 GEN11_GT_VEBOX_DISABLE_SHIFT;
777 if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
778 fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
779 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
781 gt->info.sfc_mask = ~0;
784 for (i = 0; i < I915_MAX_VCS; i++) {
785 if (!HAS_ENGINE(gt, _VCS(i))) {
786 vdbox_mask &= ~BIT(i);
790 if (!(BIT(i) & vdbox_mask)) {
791 gt->info.engine_mask &= ~BIT(_VCS(i));
792 drm_dbg(&i915->drm, "vcs%u fused off\n", i);
796 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
797 gt->info.vdbox_sfc_access |= BIT(i);
800 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
801 vdbox_mask, VDBOX_MASK(gt));
802 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
804 for (i = 0; i < I915_MAX_VECS; i++) {
805 if (!HAS_ENGINE(gt, _VECS(i))) {
806 vebox_mask &= ~BIT(i);
810 if (!(BIT(i) & vebox_mask)) {
811 gt->info.engine_mask &= ~BIT(_VECS(i));
812 drm_dbg(&i915->drm, "vecs%u fused off\n", i);
815 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
816 vebox_mask, VEBOX_MASK(gt));
817 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
820 static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
822 struct drm_i915_private *i915 = gt->i915;
823 struct intel_gt_info *info = >->info;
824 int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS;
825 unsigned long ccs_mask;
828 if (GRAPHICS_VER(i915) < 11)
831 if (hweight32(CCS_MASK(gt)) <= 1)
834 ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
837 * If all DSS in a quadrant are fused off, the corresponding CCS
838 * engine is not available for use.
840 for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
841 info->engine_mask &= ~BIT(_CCS(i));
842 drm_dbg(&i915->drm, "ccs%u fused off\n", i);
846 static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
848 struct drm_i915_private *i915 = gt->i915;
849 struct intel_gt_info *info = >->info;
850 unsigned long meml3_mask;
853 if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
854 GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
857 meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
858 meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
861 * Link Copy engines may be fused off according to meml3_mask. Each
862 * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
864 for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
865 unsigned int instance = quad * 2 + 1;
866 intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
869 if (mask & info->engine_mask) {
870 drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
871 drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
873 info->engine_mask &= ~mask;
879 * Determine which engines are fused off in our particular hardware.
880 * Note that we have a catch-22 situation where we need to be able to access
881 * the blitter forcewake domain to read the engine fuses, but at the same time
882 * we need to know which engines are available on the system to know which
883 * forcewake domains are present. We solve this by intializing the forcewake
884 * domains based on the full engine mask in the platform capabilities before
885 * calling this function and pruning the domains for fused-off engines
888 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
890 struct intel_gt_info *info = >->info;
892 GEM_BUG_ON(!info->engine_mask);
894 engine_mask_apply_media_fuses(gt);
895 engine_mask_apply_compute_fuses(gt);
896 engine_mask_apply_copy_fuses(gt);
899 * The only use of the GSC CS is to load and communicate with the GSC
900 * FW, so we have no use for it if we don't have the FW.
902 * IMPORTANT: in cases where we don't have the GSC FW, we have a
903 * catch-22 situation that breaks media C6 due to 2 requirements:
904 * 1) once turned on, the GSC power well will not go to sleep unless the
906 * 2) to enable idling (which is required for media C6) we need to
907 * initialize the IDLE_MSG register for the GSC CS and do at least 1
908 * submission, which will wake up the GSC power well.
910 if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(>->uc)) {
911 drm_notice(>->i915->drm,
912 "No GSC FW selected, disabling GSC CS and media C6\n");
913 info->engine_mask &= ~BIT(GSC0);
916 return info->engine_mask;
919 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
920 u8 class, const u8 *map, u8 num_instances)
923 u8 current_logical_id = 0;
925 for (j = 0; j < num_instances; ++j) {
926 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
927 if (!HAS_ENGINE(gt, i) ||
928 intel_engines[i].class != class)
931 if (intel_engines[i].instance == map[j]) {
932 logical_ids[intel_engines[i].instance] =
933 current_logical_id++;
940 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
943 * Logical to physical mapping is needed for proper support
944 * to split-frame feature.
946 if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) {
947 const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
949 populate_logical_ids(gt, logical_ids, class,
950 map, ARRAY_SIZE(map));
953 u8 map[MAX_ENGINE_INSTANCE + 1];
955 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
957 populate_logical_ids(gt, logical_ids, class,
958 map, ARRAY_SIZE(map));
963 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
964 * @gt: pointer to struct intel_gt
966 * Return: non-zero if the initialization failed.
968 int intel_engines_init_mmio(struct intel_gt *gt)
970 struct drm_i915_private *i915 = gt->i915;
971 const unsigned int engine_mask = init_engine_mask(gt);
972 unsigned int mask = 0;
973 unsigned int i, class;
974 u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
977 drm_WARN_ON(&i915->drm, engine_mask == 0);
978 drm_WARN_ON(&i915->drm, engine_mask &
979 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
981 if (i915_inject_probe_failure(i915))
984 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
985 setup_logical_ids(gt, logical_ids, class);
987 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
988 u8 instance = intel_engines[i].instance;
990 if (intel_engines[i].class != class ||
994 err = intel_engine_setup(gt, i,
995 logical_ids[instance]);
1004 * Catch failures to update intel_engines table when the new engines
1005 * are added to the driver by a warning and disabling the forgotten
1008 if (drm_WARN_ON(&i915->drm, mask != engine_mask))
1009 gt->info.engine_mask = mask;
1011 gt->info.num_engines = hweight32(mask);
1013 intel_gt_check_and_clear_faults(gt);
1015 intel_setup_engine_capabilities(gt);
1017 intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
1022 intel_engines_free(gt);
1026 void intel_engine_init_execlists(struct intel_engine_cs *engine)
1028 struct intel_engine_execlists * const execlists = &engine->execlists;
1030 execlists->port_mask = 1;
1031 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
1032 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
1034 memset(execlists->pending, 0, sizeof(execlists->pending));
1036 memset(execlists->inflight, 0, sizeof(execlists->inflight));
1039 static void cleanup_status_page(struct intel_engine_cs *engine)
1041 struct i915_vma *vma;
1043 /* Prevent writes into HWSP after returning the page to the system */
1044 intel_engine_set_hwsp_writemask(engine, ~0u);
1046 vma = fetch_and_zero(&engine->status_page.vma);
1050 if (!HWS_NEEDS_PHYSICAL(engine->i915))
1051 i915_vma_unpin(vma);
1053 i915_gem_object_unpin_map(vma->obj);
1054 i915_gem_object_put(vma->obj);
1057 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
1058 struct i915_gem_ww_ctx *ww,
1059 struct i915_vma *vma)
1063 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
1065 * On g33, we cannot place HWS above 256MiB, so
1066 * restrict its pinning to the low mappable arena.
1067 * Though this restriction is not documented for
1068 * gen4, gen5, or byt, they also behave similarly
1069 * and hang if the HWS is placed at the top of the
1070 * GTT. To generalise, it appears that all !llc
1071 * platforms have issues with us placing the HWS
1072 * above the mappable region (even though we never
1075 flags = PIN_MAPPABLE;
1079 return i915_ggtt_pin(vma, ww, 0, flags);
1082 static int init_status_page(struct intel_engine_cs *engine)
1084 struct drm_i915_gem_object *obj;
1085 struct i915_gem_ww_ctx ww;
1086 struct i915_vma *vma;
1090 INIT_LIST_HEAD(&engine->status_page.timelines);
1093 * Though the HWS register does support 36bit addresses, historically
1094 * we have had hangs and corruption reported due to wild writes if
1095 * the HWS is placed above 4G. We only allow objects to be allocated
1096 * in GFP_DMA32 for i965, and no earlier physical address users had
1097 * access to more than 4G.
1099 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1101 drm_err(&engine->i915->drm,
1102 "Failed to allocate status page\n");
1103 return PTR_ERR(obj);
1106 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
1108 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1114 i915_gem_ww_ctx_init(&ww, true);
1116 ret = i915_gem_object_lock(obj, &ww);
1117 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
1118 ret = pin_ggtt_status_page(engine, &ww, vma);
1122 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1123 if (IS_ERR(vaddr)) {
1124 ret = PTR_ERR(vaddr);
1128 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
1129 engine->status_page.vma = vma;
1133 i915_vma_unpin(vma);
1135 if (ret == -EDEADLK) {
1136 ret = i915_gem_ww_ctx_backoff(&ww);
1140 i915_gem_ww_ctx_fini(&ww);
1143 i915_gem_object_put(obj);
1147 static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
1149 static const union intel_engine_tlb_inv_reg gen8_regs[] = {
1150 [RENDER_CLASS].reg = GEN8_RTCR,
1151 [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
1152 [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
1153 [COPY_ENGINE_CLASS].reg = GEN8_BTCR,
1155 static const union intel_engine_tlb_inv_reg gen12_regs[] = {
1156 [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
1157 [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
1158 [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
1159 [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
1160 [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
1162 static const union intel_engine_tlb_inv_reg xehp_regs[] = {
1163 [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
1164 [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
1165 [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
1166 [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
1167 [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
1169 static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
1170 [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
1171 [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
1172 [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
1174 struct drm_i915_private *i915 = engine->i915;
1175 const unsigned int instance = engine->instance;
1176 const unsigned int class = engine->class;
1177 const union intel_engine_tlb_inv_reg *regs;
1178 union intel_engine_tlb_inv_reg reg;
1179 unsigned int num = 0;
1183 * New platforms should not be added with catch-all-newer (>=)
1184 * condition so that any later platform added triggers the below warning
1185 * and in turn mandates a human cross-check of whether the invalidation
1186 * flows have compatible semantics.
1188 * For instance with the 11.00 -> 12.00 transition three out of five
1189 * respective engine registers were moved to masked type. Then after the
1190 * 12.00 -> 12.50 transition multi cast handling is required too.
1193 if (engine->gt->type == GT_MEDIA) {
1194 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
1196 num = ARRAY_SIZE(xelpmp_regs);
1199 if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
1200 GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
1201 GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
1202 GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
1204 num = ARRAY_SIZE(xehp_regs);
1205 } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
1206 GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
1208 num = ARRAY_SIZE(gen12_regs);
1209 } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
1211 num = ARRAY_SIZE(gen8_regs);
1212 } else if (GRAPHICS_VER(i915) < 8) {
1217 if (gt_WARN_ONCE(engine->gt, !num,
1218 "Platform does not implement TLB invalidation!"))
1221 if (gt_WARN_ON_ONCE(engine->gt,
1223 (!regs[class].reg.reg &&
1224 !regs[class].mcr_reg.reg)))
1229 if (regs == xelpmp_regs && class == OTHER_CLASS) {
1231 * There's only a single GSC instance, but it uses register bit
1232 * 1 instead of either 0 or OTHER_GSC_INSTANCE.
1234 GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
1236 } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
1237 reg.reg = GEN8_M2TCR;
1245 engine->tlb_inv.mcr = regs == xehp_regs;
1246 engine->tlb_inv.reg = reg;
1247 engine->tlb_inv.done = val;
1249 if (GRAPHICS_VER(i915) >= 12 &&
1250 (engine->class == VIDEO_DECODE_CLASS ||
1251 engine->class == VIDEO_ENHANCEMENT_CLASS ||
1252 engine->class == COMPUTE_CLASS ||
1253 engine->class == OTHER_CLASS))
1254 engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
1256 engine->tlb_inv.request = val;
1261 static int engine_setup_common(struct intel_engine_cs *engine)
1265 init_llist_head(&engine->barrier_tasks);
1267 err = intel_engine_init_tlb_invalidation(engine);
1271 err = init_status_page(engine);
1275 engine->breadcrumbs = intel_breadcrumbs_create(engine);
1276 if (!engine->breadcrumbs) {
1281 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
1282 if (!engine->sched_engine) {
1284 goto err_sched_engine;
1286 engine->sched_engine->private_data = engine;
1288 err = intel_engine_init_cmd_parser(engine);
1290 goto err_cmd_parser;
1292 intel_engine_init_execlists(engine);
1293 intel_engine_init__pm(engine);
1294 intel_engine_init_retire(engine);
1296 /* Use the whole device by default */
1298 intel_sseu_from_device_info(&engine->gt->info.sseu);
1300 intel_engine_init_workarounds(engine);
1301 intel_engine_init_whitelist(engine);
1302 intel_engine_init_ctx_wa(engine);
1304 if (GRAPHICS_VER(engine->i915) >= 12)
1305 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
1310 i915_sched_engine_put(engine->sched_engine);
1312 intel_breadcrumbs_put(engine->breadcrumbs);
1314 cleanup_status_page(engine);
1318 struct measure_breadcrumb {
1319 struct i915_request rq;
1320 struct intel_ring ring;
1324 static int measure_breadcrumb_dw(struct intel_context *ce)
1326 struct intel_engine_cs *engine = ce->engine;
1327 struct measure_breadcrumb *frame;
1330 GEM_BUG_ON(!engine->gt->scratch);
1332 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1336 frame->rq.i915 = engine->i915;
1337 frame->rq.engine = engine;
1338 frame->rq.context = ce;
1339 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
1340 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
1342 frame->ring.vaddr = frame->cs;
1343 frame->ring.size = sizeof(frame->cs);
1345 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
1346 frame->ring.effective_size = frame->ring.size;
1347 intel_ring_update_space(&frame->ring);
1348 frame->rq.ring = &frame->ring;
1350 mutex_lock(&ce->timeline->mutex);
1351 spin_lock_irq(&engine->sched_engine->lock);
1353 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
1355 spin_unlock_irq(&engine->sched_engine->lock);
1356 mutex_unlock(&ce->timeline->mutex);
1358 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
1364 struct intel_context *
1365 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
1366 struct i915_address_space *vm,
1367 unsigned int ring_size,
1369 struct lock_class_key *key,
1372 struct intel_context *ce;
1375 ce = intel_context_create(engine);
1379 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
1380 ce->timeline = page_pack_bits(NULL, hwsp);
1382 ce->ring_size = ring_size;
1384 i915_vm_put(ce->vm);
1385 ce->vm = i915_vm_get(vm);
1387 err = intel_context_pin(ce); /* perma-pin so it is always available */
1389 intel_context_put(ce);
1390 return ERR_PTR(err);
1393 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
1396 * Give our perma-pinned kernel timelines a separate lockdep class,
1397 * so that we can use them from within the normal user timelines
1398 * should we need to inject GPU operations during their request
1401 lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
1406 void intel_engine_destroy_pinned_context(struct intel_context *ce)
1408 struct intel_engine_cs *engine = ce->engine;
1409 struct i915_vma *hwsp = engine->status_page.vma;
1411 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
1413 mutex_lock(&hwsp->vm->mutex);
1414 list_del(&ce->timeline->engine_link);
1415 mutex_unlock(&hwsp->vm->mutex);
1417 list_del(&ce->pinned_contexts_link);
1418 intel_context_unpin(ce);
1419 intel_context_put(ce);
1422 static struct intel_context *
1423 create_kernel_context(struct intel_engine_cs *engine)
1425 static struct lock_class_key kernel;
1427 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
1428 I915_GEM_HWS_SEQNO_ADDR,
1429 &kernel, "kernel_context");
1433 * engine_init_common - initialize engine state which might require hw access
1434 * @engine: Engine to initialize.
1436 * Initializes @engine@ structure members shared between legacy and execlists
1437 * submission modes which do require hardware access.
1439 * Typcally done at later stages of submission mode specific engine setup.
1441 * Returns zero on success or an error code on failure.
1443 static int engine_init_common(struct intel_engine_cs *engine)
1445 struct intel_context *ce;
1448 engine->set_default_submission(engine);
1451 * We may need to do things with the shrinker which
1452 * require us to immediately switch back to the default
1453 * context. This can cause a problem as pinning the
1454 * default context also requires GTT space which may not
1455 * be available. To avoid this we always pin the default
1458 ce = create_kernel_context(engine);
1462 ret = measure_breadcrumb_dw(ce);
1466 engine->emit_fini_breadcrumb_dw = ret;
1467 engine->kernel_context = ce;
1472 intel_engine_destroy_pinned_context(ce);
1476 int intel_engines_init(struct intel_gt *gt)
1478 int (*setup)(struct intel_engine_cs *engine);
1479 struct intel_engine_cs *engine;
1480 enum intel_engine_id id;
1483 if (intel_uc_uses_guc_submission(>->uc)) {
1484 gt->submission_method = INTEL_SUBMISSION_GUC;
1485 setup = intel_guc_submission_setup;
1486 } else if (HAS_EXECLISTS(gt->i915)) {
1487 gt->submission_method = INTEL_SUBMISSION_ELSP;
1488 setup = intel_execlists_submission_setup;
1490 gt->submission_method = INTEL_SUBMISSION_RING;
1491 setup = intel_ring_submission_setup;
1494 for_each_engine(engine, gt, id) {
1495 err = engine_setup_common(engine);
1499 err = setup(engine);
1501 intel_engine_cleanup_common(engine);
1505 /* The backend should now be responsible for cleanup */
1506 GEM_BUG_ON(engine->release == NULL);
1508 err = engine_init_common(engine);
1512 intel_engine_add_user(engine);
1519 * intel_engine_cleanup_common - cleans up the engine state created by
1520 * the common initiailizers.
1521 * @engine: Engine to cleanup.
1523 * This cleans up everything created by the common helpers.
1525 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1527 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1529 i915_sched_engine_put(engine->sched_engine);
1530 intel_breadcrumbs_put(engine->breadcrumbs);
1532 intel_engine_fini_retire(engine);
1533 intel_engine_cleanup_cmd_parser(engine);
1535 if (engine->default_state)
1536 fput(engine->default_state);
1538 if (engine->kernel_context)
1539 intel_engine_destroy_pinned_context(engine->kernel_context);
1541 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1542 cleanup_status_page(engine);
1544 intel_wa_list_free(&engine->ctx_wa_list);
1545 intel_wa_list_free(&engine->wa_list);
1546 intel_wa_list_free(&engine->whitelist);
1550 * intel_engine_resume - re-initializes the HW state of the engine
1551 * @engine: Engine to resume.
1553 * Returns zero on success or an error code on failure.
1555 int intel_engine_resume(struct intel_engine_cs *engine)
1557 intel_engine_apply_workarounds(engine);
1558 intel_engine_apply_whitelist(engine);
1560 return engine->resume(engine);
1563 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1565 struct drm_i915_private *i915 = engine->i915;
1569 if (GRAPHICS_VER(i915) >= 8)
1570 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1571 else if (GRAPHICS_VER(i915) >= 4)
1572 acthd = ENGINE_READ(engine, RING_ACTHD);
1574 acthd = ENGINE_READ(engine, ACTHD);
1579 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1583 if (GRAPHICS_VER(engine->i915) >= 8)
1584 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1586 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1591 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1593 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
1597 * If we are doing a normal GPU reset, we can take our time and allow
1598 * the engine to quiesce. We've stopped submission to the engine, and
1599 * if we wait long enough an innocent context should complete and
1600 * leave the engine idle. So they should not be caught unaware by
1601 * the forthcoming GPU reset (which usually follows the stop_cs)!
1603 return READ_ONCE(engine->props.stop_timeout_ms);
1606 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1607 int fast_timeout_us,
1608 int slow_timeout_ms)
1610 struct intel_uncore *uncore = engine->uncore;
1611 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1614 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1617 * Wa_22011802037: Prior to doing a reset, ensure CS is
1618 * stopped, set ring stop bit and prefetch disable bit to halt CS
1620 if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
1621 (GRAPHICS_VER(engine->i915) >= 11 &&
1622 GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
1623 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
1624 _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
1626 err = __intel_wait_for_register_fw(engine->uncore, mode,
1627 MODE_IDLE, MODE_IDLE,
1632 /* A final mmio read to let GPU writes be hopefully flushed to memory */
1633 intel_uncore_posting_read_fw(uncore, mode);
1637 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1641 if (GRAPHICS_VER(engine->i915) < 3)
1644 ENGINE_TRACE(engine, "\n");
1646 * TODO: Find out why occasionally stopping the CS times out. Seen
1647 * especially with gem_eio tests.
1649 * Occasionally trying to stop the cs times out, but does not adversely
1650 * affect functionality. The timeout is set as a config parameter that
1651 * defaults to 100ms. In most cases the follow up operation is to wait
1652 * for pending MI_FORCE_WAKES. The assumption is that this timeout is
1653 * sufficient for any pending MI_FORCEWAKEs to complete. Once root
1654 * caused, the caller must check and handle the return from this
1657 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1658 ENGINE_TRACE(engine,
1659 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
1660 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1661 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1664 * Sometimes we observe that the idle flag is not
1665 * set even though the ring is empty. So double
1666 * check before giving up.
1668 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1669 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1676 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1678 ENGINE_TRACE(engine, "\n");
1680 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1683 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
1685 static const i915_reg_t _reg[I915_NUM_ENGINES] = {
1686 [RCS0] = MSG_IDLE_CS,
1687 [BCS0] = MSG_IDLE_BCS,
1688 [VCS0] = MSG_IDLE_VCS0,
1689 [VCS1] = MSG_IDLE_VCS1,
1690 [VCS2] = MSG_IDLE_VCS2,
1691 [VCS3] = MSG_IDLE_VCS3,
1692 [VCS4] = MSG_IDLE_VCS4,
1693 [VCS5] = MSG_IDLE_VCS5,
1694 [VCS6] = MSG_IDLE_VCS6,
1695 [VCS7] = MSG_IDLE_VCS7,
1696 [VECS0] = MSG_IDLE_VECS0,
1697 [VECS1] = MSG_IDLE_VECS1,
1698 [VECS2] = MSG_IDLE_VECS2,
1699 [VECS3] = MSG_IDLE_VECS3,
1700 [CCS0] = MSG_IDLE_CS,
1701 [CCS1] = MSG_IDLE_CS,
1702 [CCS2] = MSG_IDLE_CS,
1703 [CCS3] = MSG_IDLE_CS,
1707 if (!_reg[engine->id].reg)
1710 val = intel_uncore_read(engine->uncore, _reg[engine->id]);
1712 /* bits[29:25] & bits[13:9] >> shift */
1713 return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
1716 static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
1720 /* Ensure GPM receives fw up/down after CS is stopped */
1723 /* Wait for forcewake request to complete in GPM */
1724 ret = __intel_wait_for_register_fw(gt->uncore,
1725 GEN9_PWRGT_DOMAIN_STATUS,
1726 fw_mask, fw_mask, 5000, 0, NULL);
1728 /* Ensure CS receives fw ack from GPM */
1732 GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
1736 * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
1737 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
1738 * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
1739 * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
1740 * are concerned only with the gt reset here, we use a logical OR of pending
1741 * forcewakeups from all reset domains and then wait for them to complete by
1742 * querying PWRGT_DOMAIN_STATUS.
1744 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
1746 u32 fw_pending = __cs_pending_mi_force_wakes(engine);
1749 __gpm_wait_for_fw_complete(engine->gt, fw_pending);
1752 /* NB: please notice the memset */
1753 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1754 struct intel_instdone *instdone)
1756 struct drm_i915_private *i915 = engine->i915;
1757 struct intel_uncore *uncore = engine->uncore;
1758 u32 mmio_base = engine->mmio_base;
1763 memset(instdone, 0, sizeof(*instdone));
1765 if (GRAPHICS_VER(i915) >= 8) {
1766 instdone->instdone =
1767 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1769 if (engine->id != RCS0)
1772 instdone->slice_common =
1773 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1774 if (GRAPHICS_VER(i915) >= 12) {
1775 instdone->slice_common_extra[0] =
1776 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1777 instdone->slice_common_extra[1] =
1778 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1781 for_each_ss_steering(iter, engine->gt, slice, subslice) {
1782 instdone->sampler[slice][subslice] =
1783 intel_gt_mcr_read(engine->gt,
1784 GEN8_SAMPLER_INSTDONE,
1786 instdone->row[slice][subslice] =
1787 intel_gt_mcr_read(engine->gt,
1792 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
1793 for_each_ss_steering(iter, engine->gt, slice, subslice)
1794 instdone->geom_svg[slice][subslice] =
1795 intel_gt_mcr_read(engine->gt,
1796 XEHPG_INSTDONE_GEOM_SVG,
1799 } else if (GRAPHICS_VER(i915) >= 7) {
1800 instdone->instdone =
1801 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1803 if (engine->id != RCS0)
1806 instdone->slice_common =
1807 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1808 instdone->sampler[0][0] =
1809 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1810 instdone->row[0][0] =
1811 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1812 } else if (GRAPHICS_VER(i915) >= 4) {
1813 instdone->instdone =
1814 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1815 if (engine->id == RCS0)
1816 /* HACK: Using the wrong struct member */
1817 instdone->slice_common =
1818 intel_uncore_read(uncore, GEN4_INSTDONE1);
1820 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1824 static bool ring_is_idle(struct intel_engine_cs *engine)
1828 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1831 if (!intel_engine_pm_get_if_awake(engine))
1834 /* First check that no commands are left in the ring */
1835 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1836 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1839 /* No bit for gen2, so assume the CS parser is idle */
1840 if (GRAPHICS_VER(engine->i915) > 2 &&
1841 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1844 intel_engine_pm_put(engine);
1849 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1851 struct tasklet_struct *t = &engine->sched_engine->tasklet;
1857 if (tasklet_trylock(t)) {
1858 /* Must wait for any GPU reset in progress. */
1859 if (__tasklet_is_enabled(t))
1865 /* Synchronise and wait for the tasklet on another CPU */
1867 tasklet_unlock_wait(t);
1871 * intel_engine_is_idle() - Report if the engine has finished process all work
1872 * @engine: the intel_engine_cs
1874 * Return true if there are no requests pending, nothing left to be submitted
1875 * to hardware, and that the engine is idle.
1877 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1879 /* More white lies, if wedged, hw state is inconsistent */
1880 if (intel_gt_is_wedged(engine->gt))
1883 if (!intel_engine_pm_is_awake(engine))
1886 /* Waiting to drain ELSP? */
1887 intel_synchronize_hardirq(engine->i915);
1888 intel_engine_flush_submission(engine);
1890 /* ELSP is empty, but there are ready requests? E.g. after reset */
1891 if (!i915_sched_engine_is_empty(engine->sched_engine))
1895 return ring_is_idle(engine);
1898 bool intel_engines_are_idle(struct intel_gt *gt)
1900 struct intel_engine_cs *engine;
1901 enum intel_engine_id id;
1904 * If the driver is wedged, HW state may be very inconsistent and
1905 * report that it is still busy, even though we have stopped using it.
1907 if (intel_gt_is_wedged(gt))
1910 /* Already parked (and passed an idleness test); must still be idle */
1911 if (!READ_ONCE(gt->awake))
1914 for_each_engine(engine, gt, id) {
1915 if (!intel_engine_is_idle(engine))
1922 bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1924 if (!engine->irq_enable)
1927 /* Caller disables interrupts */
1928 spin_lock(engine->gt->irq_lock);
1929 engine->irq_enable(engine);
1930 spin_unlock(engine->gt->irq_lock);
1935 void intel_engine_irq_disable(struct intel_engine_cs *engine)
1937 if (!engine->irq_disable)
1940 /* Caller disables interrupts */
1941 spin_lock(engine->gt->irq_lock);
1942 engine->irq_disable(engine);
1943 spin_unlock(engine->gt->irq_lock);
1946 void intel_engines_reset_default_submission(struct intel_gt *gt)
1948 struct intel_engine_cs *engine;
1949 enum intel_engine_id id;
1951 for_each_engine(engine, gt, id) {
1952 if (engine->sanitize)
1953 engine->sanitize(engine);
1955 engine->set_default_submission(engine);
1959 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1961 switch (GRAPHICS_VER(engine->i915)) {
1963 return false; /* uses physical not virtual addresses */
1965 /* maybe only uses physical not virtual addresses */
1966 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1968 return !IS_I965G(engine->i915); /* who knows! */
1970 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1976 static struct intel_timeline *get_timeline(struct i915_request *rq)
1978 struct intel_timeline *tl;
1981 * Even though we are holding the engine->sched_engine->lock here, there
1982 * is no control over the submission queue per-se and we are
1983 * inspecting the active state at a random point in time, with an
1984 * unknown queue. Play safe and make sure the timeline remains valid.
1985 * (Only being used for pretty printing, one extra kref shouldn't
1986 * cause a camel stampede!)
1989 tl = rcu_dereference(rq->timeline);
1990 if (!kref_get_unless_zero(&tl->kref))
1997 static int print_ring(char *buf, int sz, struct i915_request *rq)
2001 if (!i915_request_signaled(rq)) {
2002 struct intel_timeline *tl = get_timeline(rq);
2004 len = scnprintf(buf, sz,
2005 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
2006 i915_ggtt_offset(rq->ring->vma),
2007 tl ? tl->hwsp_offset : 0,
2009 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
2013 intel_timeline_put(tl);
2019 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
2021 const size_t rowsize = 8 * sizeof(u32);
2022 const void *prev = NULL;
2026 for (pos = 0; pos < len; pos += rowsize) {
2029 if (prev && !memcmp(prev, buf + pos, rowsize)) {
2031 drm_printf(m, "*\n");
2037 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
2038 rowsize, sizeof(u32),
2040 false) >= sizeof(line));
2041 drm_printf(m, "[%04zx] %s\n", pos, line);
2048 static const char *repr_timer(const struct timer_list *t)
2050 if (!READ_ONCE(t->expires))
2053 if (timer_pending(t))
2059 static void intel_engine_print_registers(struct intel_engine_cs *engine,
2060 struct drm_printer *m)
2062 struct drm_i915_private *i915 = engine->i915;
2063 struct intel_engine_execlists * const execlists = &engine->execlists;
2066 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
2067 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
2068 if (HAS_EXECLISTS(i915)) {
2069 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
2070 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
2071 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
2072 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
2074 drm_printf(m, "\tRING_START: 0x%08x\n",
2075 ENGINE_READ(engine, RING_START));
2076 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
2077 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
2078 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
2079 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
2080 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
2081 ENGINE_READ(engine, RING_CTL),
2082 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
2083 if (GRAPHICS_VER(engine->i915) > 2) {
2084 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
2085 ENGINE_READ(engine, RING_MI_MODE),
2086 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
2089 if (GRAPHICS_VER(i915) >= 6) {
2090 drm_printf(m, "\tRING_IMR: 0x%08x\n",
2091 ENGINE_READ(engine, RING_IMR));
2092 drm_printf(m, "\tRING_ESR: 0x%08x\n",
2093 ENGINE_READ(engine, RING_ESR));
2094 drm_printf(m, "\tRING_EMR: 0x%08x\n",
2095 ENGINE_READ(engine, RING_EMR));
2096 drm_printf(m, "\tRING_EIR: 0x%08x\n",
2097 ENGINE_READ(engine, RING_EIR));
2100 addr = intel_engine_get_active_head(engine);
2101 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
2102 upper_32_bits(addr), lower_32_bits(addr));
2103 addr = intel_engine_get_last_batch_head(engine);
2104 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
2105 upper_32_bits(addr), lower_32_bits(addr));
2106 if (GRAPHICS_VER(i915) >= 8)
2107 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
2108 else if (GRAPHICS_VER(i915) >= 4)
2109 addr = ENGINE_READ(engine, RING_DMA_FADD);
2111 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
2112 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
2113 upper_32_bits(addr), lower_32_bits(addr));
2114 if (GRAPHICS_VER(i915) >= 4) {
2115 drm_printf(m, "\tIPEIR: 0x%08x\n",
2116 ENGINE_READ(engine, RING_IPEIR));
2117 drm_printf(m, "\tIPEHR: 0x%08x\n",
2118 ENGINE_READ(engine, RING_IPEHR));
2120 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
2121 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
2124 if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
2125 struct i915_request * const *port, *rq;
2127 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2128 const u8 num_entries = execlists->csb_size;
2132 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
2133 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
2134 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
2135 repr_timer(&engine->execlists.preempt),
2136 repr_timer(&engine->execlists.timer));
2138 read = execlists->csb_head;
2139 write = READ_ONCE(*execlists->csb_write);
2141 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
2142 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
2143 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
2144 read, write, num_entries);
2146 if (read >= num_entries)
2148 if (write >= num_entries)
2151 write += num_entries;
2152 while (read < write) {
2153 idx = ++read % num_entries;
2154 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
2155 idx, hws[idx * 2], hws[idx * 2 + 1]);
2158 i915_sched_engine_active_lock_bh(engine->sched_engine);
2160 for (port = execlists->active; (rq = *port); port++) {
2164 len = scnprintf(hdr, sizeof(hdr),
2165 "\t\tActive[%d]: ccid:%08x%s%s, ",
2166 (int)(port - execlists->active),
2167 rq->context->lrc.ccid,
2168 intel_context_is_closed(rq->context) ? "!" : "",
2169 intel_context_is_banned(rq->context) ? "*" : "");
2170 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
2171 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
2172 i915_request_show(m, rq, hdr, 0);
2174 for (port = execlists->pending; (rq = *port); port++) {
2178 len = scnprintf(hdr, sizeof(hdr),
2179 "\t\tPending[%d]: ccid:%08x%s%s, ",
2180 (int)(port - execlists->pending),
2181 rq->context->lrc.ccid,
2182 intel_context_is_closed(rq->context) ? "!" : "",
2183 intel_context_is_banned(rq->context) ? "*" : "");
2184 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
2185 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
2186 i915_request_show(m, rq, hdr, 0);
2189 i915_sched_engine_active_unlock_bh(engine->sched_engine);
2190 } else if (GRAPHICS_VER(i915) > 6) {
2191 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
2192 ENGINE_READ(engine, RING_PP_DIR_BASE));
2193 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
2194 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
2195 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
2196 ENGINE_READ(engine, RING_PP_DIR_DCLV));
2200 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
2202 struct i915_vma_resource *vma_res = rq->batch_res;
2207 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
2208 rq->head, rq->postfix, rq->tail,
2209 vma_res ? upper_32_bits(vma_res->start) : ~0u,
2210 vma_res ? lower_32_bits(vma_res->start) : ~0u);
2212 size = rq->tail - rq->head;
2213 if (rq->tail < rq->head)
2214 size += rq->ring->size;
2216 ring = kmalloc(size, GFP_ATOMIC);
2218 const void *vaddr = rq->ring->vaddr;
2219 unsigned int head = rq->head;
2220 unsigned int len = 0;
2222 if (rq->tail < head) {
2223 len = rq->ring->size - head;
2224 memcpy(ring, vaddr + head, len);
2227 memcpy(ring + len, vaddr + head, size - len);
2229 hexdump(m, ring, size);
2234 static unsigned long read_ul(void *p, size_t x)
2236 return *(unsigned long *)(p + x);
2239 static void print_properties(struct intel_engine_cs *engine,
2240 struct drm_printer *m)
2242 static const struct pmap {
2247 .offset = offsetof(typeof(engine->props), x), \
2250 P(heartbeat_interval_ms),
2251 P(max_busywait_duration_ns),
2252 P(preempt_timeout_ms),
2254 P(timeslice_duration_ms),
2259 const struct pmap *p;
2261 drm_printf(m, "\tProperties:\n");
2262 for (p = props; p->name; p++)
2263 drm_printf(m, "\t\t%s: %lu [default %lu]\n",
2265 read_ul(&engine->props, p->offset),
2266 read_ul(&engine->defaults, p->offset));
2269 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
2271 struct intel_timeline *tl = get_timeline(rq);
2273 i915_request_show(m, rq, msg, 0);
2275 drm_printf(m, "\t\tring->start: 0x%08x\n",
2276 i915_ggtt_offset(rq->ring->vma));
2277 drm_printf(m, "\t\tring->head: 0x%08x\n",
2279 drm_printf(m, "\t\tring->tail: 0x%08x\n",
2281 drm_printf(m, "\t\tring->emit: 0x%08x\n",
2283 drm_printf(m, "\t\tring->space: 0x%08x\n",
2287 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
2289 intel_timeline_put(tl);
2292 print_request_ring(m, rq);
2294 if (rq->context->lrc_reg_state) {
2295 drm_printf(m, "Logical Ring Context:\n");
2296 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
2300 void intel_engine_dump_active_requests(struct list_head *requests,
2301 struct i915_request *hung_rq,
2302 struct drm_printer *m)
2304 struct i915_request *rq;
2306 enum i915_request_state state;
2308 list_for_each_entry(rq, requests, sched.link) {
2312 state = i915_test_request_state(rq);
2313 if (state < I915_REQUEST_QUEUED)
2316 if (state == I915_REQUEST_ACTIVE)
2317 msg = "\t\tactive on engine";
2319 msg = "\t\tactive in queue";
2321 engine_dump_request(rq, m, msg);
2325 static void engine_dump_active_requests(struct intel_engine_cs *engine,
2326 struct drm_printer *m)
2328 struct intel_context *hung_ce = NULL;
2329 struct i915_request *hung_rq = NULL;
2332 * No need for an engine->irq_seqno_barrier() before the seqno reads.
2333 * The GPU is still running so requests are still executing and any
2334 * hardware reads will be out of date by the time they are reported.
2335 * But the intention here is just to report an instantaneous snapshot
2338 intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
2340 drm_printf(m, "\tRequests:\n");
2343 engine_dump_request(hung_rq, m, "\t\thung");
2345 drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
2347 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2348 intel_guc_dump_active_requests(engine, hung_rq, m);
2350 intel_execlists_dump_active_requests(engine, hung_rq, m);
2353 i915_request_put(hung_rq);
2356 void intel_engine_dump(struct intel_engine_cs *engine,
2357 struct drm_printer *m,
2358 const char *header, ...)
2360 struct i915_gpu_error * const error = &engine->i915->gpu_error;
2361 struct i915_request *rq;
2362 intel_wakeref_t wakeref;
2368 va_start(ap, header);
2369 drm_vprintf(m, header, &ap);
2373 if (intel_gt_is_wedged(engine->gt))
2374 drm_printf(m, "*** WEDGED ***\n");
2376 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
2377 drm_printf(m, "\tBarriers?: %s\n",
2378 str_yes_no(!llist_empty(&engine->barrier_tasks)));
2379 drm_printf(m, "\tLatency: %luus\n",
2380 ewma__engine_latency_read(&engine->latency));
2381 if (intel_engine_supports_stats(engine))
2382 drm_printf(m, "\tRuntime: %llums\n",
2383 ktime_to_ms(intel_engine_get_busy_time(engine,
2385 drm_printf(m, "\tForcewake: %x domains, %d active\n",
2386 engine->fw_domain, READ_ONCE(engine->fw_active));
2389 rq = READ_ONCE(engine->heartbeat.systole);
2391 drm_printf(m, "\tHeartbeat: %d ms ago\n",
2392 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
2394 drm_printf(m, "\tReset count: %d (global %d)\n",
2395 i915_reset_engine_count(error, engine),
2396 i915_reset_count(error));
2397 print_properties(engine, m);
2399 engine_dump_active_requests(engine, m);
2401 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
2402 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
2404 intel_engine_print_registers(engine, m);
2405 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
2407 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
2410 intel_execlists_show_requests(engine, m, i915_request_show, 8);
2412 drm_printf(m, "HWSP:\n");
2413 hexdump(m, engine->status_page.addr, PAGE_SIZE);
2415 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
2417 intel_engine_print_breadcrumbs(engine, m);
2421 * intel_engine_get_busy_time() - Return current accumulated engine busyness
2422 * @engine: engine to report on
2423 * @now: monotonic timestamp of sampling
2425 * Returns accumulated time @engine was busy since engine stats were enabled.
2427 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
2429 return engine->busyness(engine, now);
2432 struct intel_context *
2433 intel_engine_create_virtual(struct intel_engine_cs **siblings,
2434 unsigned int count, unsigned long flags)
2437 return ERR_PTR(-EINVAL);
2439 if (count == 1 && !(flags & FORCE_VIRTUAL))
2440 return intel_context_create(siblings[0]);
2442 GEM_BUG_ON(!siblings[0]->cops->create_virtual);
2443 return siblings[0]->cops->create_virtual(siblings, count, flags);
2446 static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
2448 struct i915_request *request, *active = NULL;
2451 * This search does not work in GuC submission mode. However, the GuC
2452 * will report the hanging context directly to the driver itself. So
2453 * the driver should never get here when in GuC mode.
2455 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
2458 * We are called by the error capture, reset and to dump engine
2459 * state at random points in time. In particular, note that neither is
2460 * crucially ordered with an interrupt. After a hang, the GPU is dead
2461 * and we assume that no more writes can happen (we waited long enough
2462 * for all writes that were in transaction to be flushed) - adding an
2463 * extra delay for a recent interrupt is pointless. Hence, we do
2464 * not need an engine->irq_seqno_barrier() before the seqno reads.
2465 * At all other times, we must assume the GPU is still running, but
2466 * we only care about the snapshot of this moment.
2468 lockdep_assert_held(&engine->sched_engine->lock);
2471 request = execlists_active(&engine->execlists);
2473 struct intel_timeline *tl = request->context->timeline;
2475 list_for_each_entry_from_reverse(request, &tl->requests, link) {
2476 if (__i915_request_is_complete(request))
2486 list_for_each_entry(request, &engine->sched_engine->requests,
2488 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
2498 void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
2499 struct intel_context **ce, struct i915_request **rq)
2501 unsigned long flags;
2503 *ce = intel_engine_get_hung_context(engine);
2505 intel_engine_clear_hung_context(engine);
2507 *rq = intel_context_get_active_request(*ce);
2512 * Getting here with GuC enabled means it is a forced error capture
2513 * with no actual hang. So, no need to attempt the execlist search.
2515 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2518 spin_lock_irqsave(&engine->sched_engine->lock, flags);
2519 *rq = engine_execlist_find_hung_request(engine);
2521 *rq = i915_request_get_rcu(*rq);
2522 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
2525 void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
2528 * If there are any non-fused-off CCS engines, we need to enable CCS
2529 * support in the RCU_MODE register. This only needs to be done once,
2530 * so for simplicity we'll take care of this in the RCS engine's
2531 * resume handler; since the RCS and all CCS engines belong to the
2532 * same reset domain and are reset together, this will also take care
2533 * of re-applying the setting after i915-triggered resets.
2535 if (!CCS_MASK(engine->gt))
2538 intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
2539 _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
2542 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2543 #include "mock_engine.c"
2544 #include "selftest_engine.c"
2545 #include "selftest_engine_cs.c"