2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "gem/i915_gem_context.h"
31 #include "intel_breadcrumbs.h"
32 #include "intel_context.h"
33 #include "intel_engine.h"
34 #include "intel_engine_pm.h"
35 #include "intel_engine_user.h"
36 #include "intel_execlists_submission.h"
38 #include "intel_gt_requests.h"
39 #include "intel_gt_pm.h"
40 #include "intel_lrc_reg.h"
41 #include "intel_reset.h"
42 #include "intel_ring.h"
43 #include "uc/intel_guc_submission.h"
45 /* Haswell does have the CXT_SIZE register however it does not appear to be
46 * valid. Now, docs explain in dwords what is in the context object. The full
47 * size is 70720 bytes, however, the power context and execlist context will
48 * never be saved (power context is stored elsewhere, and execlists don't work
49 * on HSW) - so the final size, including the extra state required for the
50 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
52 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
54 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
55 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
56 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
57 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
58 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
60 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
62 #define MAX_MMIO_BASES 3
67 /* mmio bases table *must* be sorted in reverse gen order */
68 struct engine_mmio_base {
71 } mmio_bases[MAX_MMIO_BASES];
74 static const struct engine_info intel_engines[] = {
77 .class = RENDER_CLASS,
80 { .gen = 1, .base = RENDER_RING_BASE }
85 .class = COPY_ENGINE_CLASS,
88 { .gen = 6, .base = BLT_RING_BASE }
93 .class = VIDEO_DECODE_CLASS,
96 { .gen = 11, .base = GEN11_BSD_RING_BASE },
97 { .gen = 6, .base = GEN6_BSD_RING_BASE },
98 { .gen = 4, .base = BSD_RING_BASE }
103 .class = VIDEO_DECODE_CLASS,
106 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
107 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
112 .class = VIDEO_DECODE_CLASS,
115 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
120 .class = VIDEO_DECODE_CLASS,
123 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
128 .class = VIDEO_ENHANCEMENT_CLASS,
131 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
132 { .gen = 7, .base = VEBOX_RING_BASE }
137 .class = VIDEO_ENHANCEMENT_CLASS,
140 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
146 * intel_engine_context_size() - return the size of the context for an engine
148 * @class: engine class
150 * Each engine class may require a different amount of space for a context
153 * Return: size (in bytes) of an engine class specific context image
155 * Note: this size includes the HWSP, which is part of the context image
156 * in LRC mode, but does not include the "shared data page" used with
157 * GuC submission. The caller should account for this if using the GuC.
159 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
161 struct intel_uncore *uncore = gt->uncore;
164 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
168 switch (INTEL_GEN(gt->i915)) {
170 MISSING_CASE(INTEL_GEN(gt->i915));
171 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
174 return GEN11_LR_CONTEXT_RENDER_SIZE;
176 return GEN10_LR_CONTEXT_RENDER_SIZE;
178 return GEN9_LR_CONTEXT_RENDER_SIZE;
180 return GEN8_LR_CONTEXT_RENDER_SIZE;
182 if (IS_HASWELL(gt->i915))
183 return HSW_CXT_TOTAL_SIZE;
185 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
186 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
189 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
190 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
195 * There is a discrepancy here between the size reported
196 * by the register and the size of the context layout
197 * in the docs. Both are described as authorative!
199 * The discrepancy is on the order of a few cachelines,
200 * but the total is under one page (4k), which is our
201 * minimum allocation anyway so it should all come
204 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
205 drm_dbg(>->i915->drm,
206 "gen%d CXT_SIZE = %d bytes [0x%08x]\n",
207 INTEL_GEN(gt->i915), cxt_size * 64,
209 return round_up(cxt_size * 64, PAGE_SIZE);
212 /* For the special day when i810 gets merged. */
220 case VIDEO_DECODE_CLASS:
221 case VIDEO_ENHANCEMENT_CLASS:
222 case COPY_ENGINE_CLASS:
223 if (INTEL_GEN(gt->i915) < 8)
225 return GEN8_LR_CONTEXT_OTHER_SIZE;
229 static u32 __engine_mmio_base(struct drm_i915_private *i915,
230 const struct engine_mmio_base *bases)
234 for (i = 0; i < MAX_MMIO_BASES; i++)
235 if (INTEL_GEN(i915) >= bases[i].gen)
238 GEM_BUG_ON(i == MAX_MMIO_BASES);
239 GEM_BUG_ON(!bases[i].base);
241 return bases[i].base;
244 static void __sprint_engine_name(struct intel_engine_cs *engine)
247 * Before we know what the uABI name for this engine will be,
248 * we still would like to keep track of this engine in the debug logs.
249 * We throw in a ' here as a reminder that this isn't its final name.
251 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
252 intel_engine_class_repr(engine->class),
253 engine->instance) >= sizeof(engine->name));
256 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
259 * Though they added more rings on g4x/ilk, they did not add
260 * per-engine HWSTAM until gen6.
262 if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
265 if (INTEL_GEN(engine->i915) >= 3)
266 ENGINE_WRITE(engine, RING_HWSTAM, mask);
268 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
271 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
273 /* Mask off all writes into the unknown HWSP */
274 intel_engine_set_hwsp_writemask(engine, ~0u);
277 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
279 const struct engine_info *info = &intel_engines[id];
280 struct drm_i915_private *i915 = gt->i915;
281 struct intel_engine_cs *engine;
283 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
284 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
286 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
289 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
292 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
295 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
298 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
302 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
305 engine->legacy_idx = INVALID_ENGINE;
306 engine->mask = BIT(id);
309 engine->uncore = gt->uncore;
310 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
311 engine->hw_id = info->hw_id;
312 engine->guc_id = MAKE_GUC_ID(info->class, info->instance);
314 engine->class = info->class;
315 engine->instance = info->instance;
316 __sprint_engine_name(engine);
318 engine->props.heartbeat_interval_ms =
319 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
320 engine->props.max_busywait_duration_ns =
321 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
322 engine->props.preempt_timeout_ms =
323 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
324 engine->props.stop_timeout_ms =
325 CONFIG_DRM_I915_STOP_TIMEOUT;
326 engine->props.timeslice_duration_ms =
327 CONFIG_DRM_I915_TIMESLICE_DURATION;
329 /* Override to uninterruptible for OpenCL workloads. */
330 if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
331 engine->props.preempt_timeout_ms = 0;
333 engine->defaults = engine->props; /* never to change again */
335 engine->context_size = intel_engine_context_size(gt, engine->class);
336 if (WARN_ON(engine->context_size > BIT(20)))
337 engine->context_size = 0;
338 if (engine->context_size)
339 DRIVER_CAPS(i915)->has_logical_contexts = true;
341 /* Nothing to do here, execute in order of dependencies */
342 engine->schedule = NULL;
344 ewma__engine_latency_init(&engine->latency);
345 seqcount_init(&engine->stats.lock);
347 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
349 /* Scrub mmio state on takeover */
350 intel_engine_sanitize_mmio(engine);
352 gt->engine_class[info->class][info->instance] = engine;
353 gt->engine[id] = engine;
358 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
360 struct drm_i915_private *i915 = engine->i915;
362 if (engine->class == VIDEO_DECODE_CLASS) {
364 * HEVC support is present on first engine instance
365 * before Gen11 and on all instances afterwards.
367 if (INTEL_GEN(i915) >= 11 ||
368 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
369 engine->uabi_capabilities |=
370 I915_VIDEO_CLASS_CAPABILITY_HEVC;
373 * SFC block is present only on even logical engine
376 if ((INTEL_GEN(i915) >= 11 &&
377 (engine->gt->info.vdbox_sfc_access &
378 BIT(engine->instance))) ||
379 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
380 engine->uabi_capabilities |=
381 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
382 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
383 if (INTEL_GEN(i915) >= 9)
384 engine->uabi_capabilities |=
385 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
389 static void intel_setup_engine_capabilities(struct intel_gt *gt)
391 struct intel_engine_cs *engine;
392 enum intel_engine_id id;
394 for_each_engine(engine, gt, id)
395 __setup_engine_capabilities(engine);
399 * intel_engines_release() - free the resources allocated for Command Streamers
400 * @gt: pointer to struct intel_gt
402 void intel_engines_release(struct intel_gt *gt)
404 struct intel_engine_cs *engine;
405 enum intel_engine_id id;
408 * Before we release the resources held by engine, we must be certain
409 * that the HW is no longer accessing them -- having the GPU scribble
410 * to or read from a page being used for something else causes no end
413 * The GPU should be reset by this point, but assume the worst just
414 * in case we aborted before completely initialising the engines.
416 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
417 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
418 __intel_gt_reset(gt, ALL_ENGINES);
420 /* Decouple the backend; but keep the layout for late GPU resets */
421 for_each_engine(engine, gt, id) {
422 if (!engine->release)
425 intel_wakeref_wait_for_idle(&engine->wakeref);
426 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
428 engine->release(engine);
429 engine->release = NULL;
431 memset(&engine->reset, 0, sizeof(engine->reset));
435 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
437 if (!engine->request_pool)
440 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
443 void intel_engines_free(struct intel_gt *gt)
445 struct intel_engine_cs *engine;
446 enum intel_engine_id id;
448 /* Free the requests! dma-resv keeps fences around for an eternity */
451 for_each_engine(engine, gt, id) {
452 intel_engine_free_request_pool(engine);
454 gt->engine[id] = NULL;
459 * Determine which engines are fused off in our particular hardware.
460 * Note that we have a catch-22 situation where we need to be able to access
461 * the blitter forcewake domain to read the engine fuses, but at the same time
462 * we need to know which engines are available on the system to know which
463 * forcewake domains are present. We solve this by intializing the forcewake
464 * domains based on the full engine mask in the platform capabilities before
465 * calling this function and pruning the domains for fused-off engines
468 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
470 struct drm_i915_private *i915 = gt->i915;
471 struct intel_gt_info *info = >->info;
472 struct intel_uncore *uncore = gt->uncore;
473 unsigned int logical_vdbox = 0;
479 info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
481 if (INTEL_GEN(i915) < 11)
482 return info->engine_mask;
484 media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
486 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
487 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
488 GEN11_GT_VEBOX_DISABLE_SHIFT;
490 for (i = 0; i < I915_MAX_VCS; i++) {
491 if (!HAS_ENGINE(gt, _VCS(i))) {
492 vdbox_mask &= ~BIT(i);
496 if (!(BIT(i) & vdbox_mask)) {
497 info->engine_mask &= ~BIT(_VCS(i));
498 drm_dbg(&i915->drm, "vcs%u fused off\n", i);
503 * In Gen11, only even numbered logical VDBOXes are
504 * hooked up to an SFC (Scaler & Format Converter) unit.
505 * In TGL each VDBOX has access to an SFC.
507 if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
508 gt->info.vdbox_sfc_access |= BIT(i);
510 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
511 vdbox_mask, VDBOX_MASK(gt));
512 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
514 for (i = 0; i < I915_MAX_VECS; i++) {
515 if (!HAS_ENGINE(gt, _VECS(i))) {
516 vebox_mask &= ~BIT(i);
520 if (!(BIT(i) & vebox_mask)) {
521 info->engine_mask &= ~BIT(_VECS(i));
522 drm_dbg(&i915->drm, "vecs%u fused off\n", i);
525 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
526 vebox_mask, VEBOX_MASK(gt));
527 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
529 return info->engine_mask;
533 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
534 * @gt: pointer to struct intel_gt
536 * Return: non-zero if the initialization failed.
538 int intel_engines_init_mmio(struct intel_gt *gt)
540 struct drm_i915_private *i915 = gt->i915;
541 const unsigned int engine_mask = init_engine_mask(gt);
542 unsigned int mask = 0;
546 drm_WARN_ON(&i915->drm, engine_mask == 0);
547 drm_WARN_ON(&i915->drm, engine_mask &
548 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
550 if (i915_inject_probe_failure(i915))
553 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
554 if (!HAS_ENGINE(gt, i))
557 err = intel_engine_setup(gt, i);
565 * Catch failures to update intel_engines table when the new engines
566 * are added to the driver by a warning and disabling the forgotten
569 if (drm_WARN_ON(&i915->drm, mask != engine_mask))
570 gt->info.engine_mask = mask;
572 gt->info.num_engines = hweight32(mask);
574 intel_gt_check_and_clear_faults(gt);
576 intel_setup_engine_capabilities(gt);
578 intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
583 intel_engines_free(gt);
587 void intel_engine_init_execlists(struct intel_engine_cs *engine)
589 struct intel_engine_execlists * const execlists = &engine->execlists;
591 execlists->port_mask = 1;
592 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
593 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
595 memset(execlists->pending, 0, sizeof(execlists->pending));
597 memset(execlists->inflight, 0, sizeof(execlists->inflight));
599 execlists->queue_priority_hint = INT_MIN;
600 execlists->queue = RB_ROOT_CACHED;
603 static void cleanup_status_page(struct intel_engine_cs *engine)
605 struct i915_vma *vma;
607 /* Prevent writes into HWSP after returning the page to the system */
608 intel_engine_set_hwsp_writemask(engine, ~0u);
610 vma = fetch_and_zero(&engine->status_page.vma);
614 if (!HWS_NEEDS_PHYSICAL(engine->i915))
617 i915_gem_object_unpin_map(vma->obj);
618 i915_gem_object_put(vma->obj);
621 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
622 struct i915_vma *vma)
626 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
628 * On g33, we cannot place HWS above 256MiB, so
629 * restrict its pinning to the low mappable arena.
630 * Though this restriction is not documented for
631 * gen4, gen5, or byt, they also behave similarly
632 * and hang if the HWS is placed at the top of the
633 * GTT. To generalise, it appears that all !llc
634 * platforms have issues with us placing the HWS
635 * above the mappable region (even though we never
638 flags = PIN_MAPPABLE;
642 return i915_ggtt_pin(vma, NULL, 0, flags);
645 static int init_status_page(struct intel_engine_cs *engine)
647 struct drm_i915_gem_object *obj;
648 struct i915_vma *vma;
652 INIT_LIST_HEAD(&engine->status_page.timelines);
655 * Though the HWS register does support 36bit addresses, historically
656 * we have had hangs and corruption reported due to wild writes if
657 * the HWS is placed above 4G. We only allow objects to be allocated
658 * in GFP_DMA32 for i965, and no earlier physical address users had
659 * access to more than 4G.
661 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
663 drm_err(&engine->i915->drm,
664 "Failed to allocate status page\n");
668 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
670 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
676 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
678 ret = PTR_ERR(vaddr);
682 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
683 engine->status_page.vma = vma;
685 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
686 ret = pin_ggtt_status_page(engine, vma);
694 i915_gem_object_unpin_map(obj);
696 i915_gem_object_put(obj);
700 static int engine_setup_common(struct intel_engine_cs *engine)
704 init_llist_head(&engine->barrier_tasks);
706 err = init_status_page(engine);
710 engine->breadcrumbs = intel_breadcrumbs_create(engine);
711 if (!engine->breadcrumbs) {
716 intel_engine_init_active(engine, ENGINE_PHYSICAL);
717 intel_engine_init_execlists(engine);
718 intel_engine_init_cmd_parser(engine);
719 intel_engine_init__pm(engine);
720 intel_engine_init_retire(engine);
722 /* Use the whole device by default */
724 intel_sseu_from_device_info(&engine->gt->info.sseu);
726 intel_engine_init_workarounds(engine);
727 intel_engine_init_whitelist(engine);
728 intel_engine_init_ctx_wa(engine);
730 if (INTEL_GEN(engine->i915) >= 12)
731 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
736 cleanup_status_page(engine);
740 struct measure_breadcrumb {
741 struct i915_request rq;
742 struct intel_ring ring;
746 static int measure_breadcrumb_dw(struct intel_context *ce)
748 struct intel_engine_cs *engine = ce->engine;
749 struct measure_breadcrumb *frame;
752 GEM_BUG_ON(!engine->gt->scratch);
754 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
758 frame->rq.engine = engine;
759 frame->rq.context = ce;
760 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
762 frame->ring.vaddr = frame->cs;
763 frame->ring.size = sizeof(frame->cs);
765 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
766 frame->ring.effective_size = frame->ring.size;
767 intel_ring_update_space(&frame->ring);
768 frame->rq.ring = &frame->ring;
770 mutex_lock(&ce->timeline->mutex);
771 spin_lock_irq(&engine->active.lock);
773 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
775 spin_unlock_irq(&engine->active.lock);
776 mutex_unlock(&ce->timeline->mutex);
778 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
785 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
787 INIT_LIST_HEAD(&engine->active.requests);
788 INIT_LIST_HEAD(&engine->active.hold);
790 spin_lock_init(&engine->active.lock);
791 lockdep_set_subclass(&engine->active.lock, subclass);
794 * Due to an interesting quirk in lockdep's internal debug tracking,
795 * after setting a subclass we must ensure the lock is used. Otherwise,
796 * nr_unused_locks is incremented once too often.
798 #ifdef CONFIG_DEBUG_LOCK_ALLOC
800 lock_map_acquire(&engine->active.lock.dep_map);
801 lock_map_release(&engine->active.lock.dep_map);
806 static struct intel_context *
807 create_pinned_context(struct intel_engine_cs *engine,
809 struct lock_class_key *key,
812 struct intel_context *ce;
815 ce = intel_context_create(engine);
819 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
820 ce->timeline = page_pack_bits(NULL, hwsp);
822 err = intel_context_pin(ce); /* perma-pin so it is always available */
824 intel_context_put(ce);
829 * Give our perma-pinned kernel timelines a separate lockdep class,
830 * so that we can use them from within the normal user timelines
831 * should we need to inject GPU operations during their request
834 lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
839 static void destroy_pinned_context(struct intel_context *ce)
841 struct intel_engine_cs *engine = ce->engine;
842 struct i915_vma *hwsp = engine->status_page.vma;
844 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
846 mutex_lock(&hwsp->vm->mutex);
847 list_del(&ce->timeline->engine_link);
848 mutex_unlock(&hwsp->vm->mutex);
850 intel_context_unpin(ce);
851 intel_context_put(ce);
854 static struct intel_context *
855 create_kernel_context(struct intel_engine_cs *engine)
857 static struct lock_class_key kernel;
859 return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
860 &kernel, "kernel_context");
864 * intel_engines_init_common - initialize cengine state which might require hw access
865 * @engine: Engine to initialize.
867 * Initializes @engine@ structure members shared between legacy and execlists
868 * submission modes which do require hardware access.
870 * Typcally done at later stages of submission mode specific engine setup.
872 * Returns zero on success or an error code on failure.
874 static int engine_init_common(struct intel_engine_cs *engine)
876 struct intel_context *ce;
879 engine->set_default_submission(engine);
882 * We may need to do things with the shrinker which
883 * require us to immediately switch back to the default
884 * context. This can cause a problem as pinning the
885 * default context also requires GTT space which may not
886 * be available. To avoid this we always pin the default
889 ce = create_kernel_context(engine);
893 ret = measure_breadcrumb_dw(ce);
897 engine->emit_fini_breadcrumb_dw = ret;
898 engine->kernel_context = ce;
903 intel_context_put(ce);
907 int intel_engines_init(struct intel_gt *gt)
909 int (*setup)(struct intel_engine_cs *engine);
910 struct intel_engine_cs *engine;
911 enum intel_engine_id id;
914 if (intel_uc_uses_guc_submission(>->uc))
915 setup = intel_guc_submission_setup;
916 else if (HAS_EXECLISTS(gt->i915))
917 setup = intel_execlists_submission_setup;
919 setup = intel_ring_submission_setup;
921 for_each_engine(engine, gt, id) {
922 err = engine_setup_common(engine);
930 err = engine_init_common(engine);
934 intel_engine_add_user(engine);
941 * intel_engines_cleanup_common - cleans up the engine state created by
942 * the common initiailizers.
943 * @engine: Engine to cleanup.
945 * This cleans up everything created by the common helpers.
947 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
949 GEM_BUG_ON(!list_empty(&engine->active.requests));
950 tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
952 intel_breadcrumbs_free(engine->breadcrumbs);
954 intel_engine_fini_retire(engine);
955 intel_engine_cleanup_cmd_parser(engine);
957 if (engine->default_state)
958 fput(engine->default_state);
960 if (engine->kernel_context)
961 destroy_pinned_context(engine->kernel_context);
963 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
964 cleanup_status_page(engine);
966 intel_wa_list_free(&engine->ctx_wa_list);
967 intel_wa_list_free(&engine->wa_list);
968 intel_wa_list_free(&engine->whitelist);
972 * intel_engine_resume - re-initializes the HW state of the engine
973 * @engine: Engine to resume.
975 * Returns zero on success or an error code on failure.
977 int intel_engine_resume(struct intel_engine_cs *engine)
979 intel_engine_apply_workarounds(engine);
980 intel_engine_apply_whitelist(engine);
982 return engine->resume(engine);
985 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
987 struct drm_i915_private *i915 = engine->i915;
991 if (INTEL_GEN(i915) >= 8)
992 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
993 else if (INTEL_GEN(i915) >= 4)
994 acthd = ENGINE_READ(engine, RING_ACTHD);
996 acthd = ENGINE_READ(engine, ACTHD);
1001 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1005 if (INTEL_GEN(engine->i915) >= 8)
1006 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1008 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1013 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1015 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
1019 * If we are doing a normal GPU reset, we can take our time and allow
1020 * the engine to quiesce. We've stopped submission to the engine, and
1021 * if we wait long enough an innocent context should complete and
1022 * leave the engine idle. So they should not be caught unaware by
1023 * the forthcoming GPU reset (which usually follows the stop_cs)!
1025 return READ_ONCE(engine->props.stop_timeout_ms);
1028 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1029 int fast_timeout_us,
1030 int slow_timeout_ms)
1032 struct intel_uncore *uncore = engine->uncore;
1033 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1036 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1037 err = __intel_wait_for_register_fw(engine->uncore, mode,
1038 MODE_IDLE, MODE_IDLE,
1043 /* A final mmio read to let GPU writes be hopefully flushed to memory */
1044 intel_uncore_posting_read_fw(uncore, mode);
1048 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1052 if (INTEL_GEN(engine->i915) < 3)
1055 ENGINE_TRACE(engine, "\n");
1056 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1057 ENGINE_TRACE(engine,
1058 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
1059 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1060 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1063 * Sometimes we observe that the idle flag is not
1064 * set even though the ring is empty. So double
1065 * check before giving up.
1067 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1068 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1075 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1077 ENGINE_TRACE(engine, "\n");
1079 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1082 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1085 case I915_CACHE_NONE: return " uncached";
1086 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1087 case I915_CACHE_L3_LLC: return " L3+LLC";
1088 case I915_CACHE_WT: return " WT";
1094 read_subslice_reg(const struct intel_engine_cs *engine,
1095 int slice, int subslice, i915_reg_t reg)
1097 struct drm_i915_private *i915 = engine->i915;
1098 struct intel_uncore *uncore = engine->uncore;
1099 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
1100 enum forcewake_domains fw_domains;
1102 if (INTEL_GEN(i915) >= 11) {
1103 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1104 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1106 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1107 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1110 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
1112 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
1114 FW_REG_READ | FW_REG_WRITE);
1116 spin_lock_irq(&uncore->lock);
1117 intel_uncore_forcewake_get__locked(uncore, fw_domains);
1119 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
1123 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
1125 val = intel_uncore_read_fw(uncore, reg);
1128 mcr |= old_mcr & mcr_mask;
1130 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
1132 intel_uncore_forcewake_put__locked(uncore, fw_domains);
1133 spin_unlock_irq(&uncore->lock);
1138 /* NB: please notice the memset */
1139 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1140 struct intel_instdone *instdone)
1142 struct drm_i915_private *i915 = engine->i915;
1143 const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
1144 struct intel_uncore *uncore = engine->uncore;
1145 u32 mmio_base = engine->mmio_base;
1149 memset(instdone, 0, sizeof(*instdone));
1151 switch (INTEL_GEN(i915)) {
1153 instdone->instdone =
1154 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1156 if (engine->id != RCS0)
1159 instdone->slice_common =
1160 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1161 if (INTEL_GEN(i915) >= 12) {
1162 instdone->slice_common_extra[0] =
1163 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1164 instdone->slice_common_extra[1] =
1165 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1167 for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1168 instdone->sampler[slice][subslice] =
1169 read_subslice_reg(engine, slice, subslice,
1170 GEN7_SAMPLER_INSTDONE);
1171 instdone->row[slice][subslice] =
1172 read_subslice_reg(engine, slice, subslice,
1177 instdone->instdone =
1178 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1180 if (engine->id != RCS0)
1183 instdone->slice_common =
1184 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1185 instdone->sampler[0][0] =
1186 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1187 instdone->row[0][0] =
1188 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1194 instdone->instdone =
1195 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1196 if (engine->id == RCS0)
1197 /* HACK: Using the wrong struct member */
1198 instdone->slice_common =
1199 intel_uncore_read(uncore, GEN4_INSTDONE1);
1203 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1208 static bool ring_is_idle(struct intel_engine_cs *engine)
1212 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1215 if (!intel_engine_pm_get_if_awake(engine))
1218 /* First check that no commands are left in the ring */
1219 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1220 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1223 /* No bit for gen2, so assume the CS parser is idle */
1224 if (INTEL_GEN(engine->i915) > 2 &&
1225 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1228 intel_engine_pm_put(engine);
1233 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1235 struct tasklet_struct *t = &engine->execlists.tasklet;
1241 if (tasklet_trylock(t)) {
1242 /* Must wait for any GPU reset in progress. */
1243 if (__tasklet_is_enabled(t))
1249 /* Synchronise and wait for the tasklet on another CPU */
1251 tasklet_unlock_wait(t);
1255 * intel_engine_is_idle() - Report if the engine has finished process all work
1256 * @engine: the intel_engine_cs
1258 * Return true if there are no requests pending, nothing left to be submitted
1259 * to hardware, and that the engine is idle.
1261 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1263 /* More white lies, if wedged, hw state is inconsistent */
1264 if (intel_gt_is_wedged(engine->gt))
1267 if (!intel_engine_pm_is_awake(engine))
1270 /* Waiting to drain ELSP? */
1271 if (execlists_active(&engine->execlists)) {
1272 synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
1274 intel_engine_flush_submission(engine);
1276 if (execlists_active(&engine->execlists))
1280 /* ELSP is empty, but there are ready requests? E.g. after reset */
1281 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1285 return ring_is_idle(engine);
1288 bool intel_engines_are_idle(struct intel_gt *gt)
1290 struct intel_engine_cs *engine;
1291 enum intel_engine_id id;
1294 * If the driver is wedged, HW state may be very inconsistent and
1295 * report that it is still busy, even though we have stopped using it.
1297 if (intel_gt_is_wedged(gt))
1300 /* Already parked (and passed an idleness test); must still be idle */
1301 if (!READ_ONCE(gt->awake))
1304 for_each_engine(engine, gt, id) {
1305 if (!intel_engine_is_idle(engine))
1312 void intel_engines_reset_default_submission(struct intel_gt *gt)
1314 struct intel_engine_cs *engine;
1315 enum intel_engine_id id;
1317 for_each_engine(engine, gt, id) {
1318 if (engine->sanitize)
1319 engine->sanitize(engine);
1321 engine->set_default_submission(engine);
1325 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1327 switch (INTEL_GEN(engine->i915)) {
1329 return false; /* uses physical not virtual addresses */
1331 /* maybe only uses physical not virtual addresses */
1332 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1334 return !IS_I965G(engine->i915); /* who knows! */
1336 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1342 static struct intel_timeline *get_timeline(struct i915_request *rq)
1344 struct intel_timeline *tl;
1347 * Even though we are holding the engine->active.lock here, there
1348 * is no control over the submission queue per-se and we are
1349 * inspecting the active state at a random point in time, with an
1350 * unknown queue. Play safe and make sure the timeline remains valid.
1351 * (Only being used for pretty printing, one extra kref shouldn't
1352 * cause a camel stampede!)
1355 tl = rcu_dereference(rq->timeline);
1356 if (!kref_get_unless_zero(&tl->kref))
1363 static int print_ring(char *buf, int sz, struct i915_request *rq)
1367 if (!i915_request_signaled(rq)) {
1368 struct intel_timeline *tl = get_timeline(rq);
1370 len = scnprintf(buf, sz,
1371 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1372 i915_ggtt_offset(rq->ring->vma),
1373 tl ? tl->hwsp_offset : 0,
1375 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1379 intel_timeline_put(tl);
1385 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1387 const size_t rowsize = 8 * sizeof(u32);
1388 const void *prev = NULL;
1392 for (pos = 0; pos < len; pos += rowsize) {
1395 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1397 drm_printf(m, "*\n");
1403 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1404 rowsize, sizeof(u32),
1406 false) >= sizeof(line));
1407 drm_printf(m, "[%04zx] %s\n", pos, line);
1414 static const char *repr_timer(const struct timer_list *t)
1416 if (!READ_ONCE(t->expires))
1419 if (timer_pending(t))
1425 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1426 struct drm_printer *m)
1428 struct drm_i915_private *dev_priv = engine->i915;
1429 struct intel_engine_execlists * const execlists = &engine->execlists;
1432 if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
1433 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1434 if (HAS_EXECLISTS(dev_priv)) {
1435 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
1436 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
1437 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
1438 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
1440 drm_printf(m, "\tRING_START: 0x%08x\n",
1441 ENGINE_READ(engine, RING_START));
1442 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1443 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1444 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1445 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1446 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1447 ENGINE_READ(engine, RING_CTL),
1448 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1449 if (INTEL_GEN(engine->i915) > 2) {
1450 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1451 ENGINE_READ(engine, RING_MI_MODE),
1452 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1455 if (INTEL_GEN(dev_priv) >= 6) {
1456 drm_printf(m, "\tRING_IMR: 0x%08x\n",
1457 ENGINE_READ(engine, RING_IMR));
1458 drm_printf(m, "\tRING_ESR: 0x%08x\n",
1459 ENGINE_READ(engine, RING_ESR));
1460 drm_printf(m, "\tRING_EMR: 0x%08x\n",
1461 ENGINE_READ(engine, RING_EMR));
1462 drm_printf(m, "\tRING_EIR: 0x%08x\n",
1463 ENGINE_READ(engine, RING_EIR));
1466 addr = intel_engine_get_active_head(engine);
1467 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1468 upper_32_bits(addr), lower_32_bits(addr));
1469 addr = intel_engine_get_last_batch_head(engine);
1470 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1471 upper_32_bits(addr), lower_32_bits(addr));
1472 if (INTEL_GEN(dev_priv) >= 8)
1473 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1474 else if (INTEL_GEN(dev_priv) >= 4)
1475 addr = ENGINE_READ(engine, RING_DMA_FADD);
1477 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1478 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1479 upper_32_bits(addr), lower_32_bits(addr));
1480 if (INTEL_GEN(dev_priv) >= 4) {
1481 drm_printf(m, "\tIPEIR: 0x%08x\n",
1482 ENGINE_READ(engine, RING_IPEIR));
1483 drm_printf(m, "\tIPEHR: 0x%08x\n",
1484 ENGINE_READ(engine, RING_IPEHR));
1486 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1487 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1490 if (intel_engine_in_guc_submission_mode(engine)) {
1491 /* nothing to print yet */
1492 } else if (HAS_EXECLISTS(dev_priv)) {
1493 struct i915_request * const *port, *rq;
1495 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1496 const u8 num_entries = execlists->csb_size;
1500 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1501 yesno(test_bit(TASKLET_STATE_SCHED,
1502 &engine->execlists.tasklet.state)),
1503 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
1504 repr_timer(&engine->execlists.preempt),
1505 repr_timer(&engine->execlists.timer));
1507 read = execlists->csb_head;
1508 write = READ_ONCE(*execlists->csb_write);
1510 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1511 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1512 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1513 read, write, num_entries);
1515 if (read >= num_entries)
1517 if (write >= num_entries)
1520 write += num_entries;
1521 while (read < write) {
1522 idx = ++read % num_entries;
1523 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1524 idx, hws[idx * 2], hws[idx * 2 + 1]);
1527 execlists_active_lock_bh(execlists);
1529 for (port = execlists->active; (rq = *port); port++) {
1533 len = scnprintf(hdr, sizeof(hdr),
1534 "\t\tActive[%d]: ccid:%08x%s%s, ",
1535 (int)(port - execlists->active),
1536 rq->context->lrc.ccid,
1537 intel_context_is_closed(rq->context) ? "!" : "",
1538 intel_context_is_banned(rq->context) ? "*" : "");
1539 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1540 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1541 i915_request_show(m, rq, hdr, 0);
1543 for (port = execlists->pending; (rq = *port); port++) {
1547 len = scnprintf(hdr, sizeof(hdr),
1548 "\t\tPending[%d]: ccid:%08x%s%s, ",
1549 (int)(port - execlists->pending),
1550 rq->context->lrc.ccid,
1551 intel_context_is_closed(rq->context) ? "!" : "",
1552 intel_context_is_banned(rq->context) ? "*" : "");
1553 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1554 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1555 i915_request_show(m, rq, hdr, 0);
1558 execlists_active_unlock_bh(execlists);
1559 } else if (INTEL_GEN(dev_priv) > 6) {
1560 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1561 ENGINE_READ(engine, RING_PP_DIR_BASE));
1562 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1563 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1564 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1565 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1569 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1575 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1576 rq->head, rq->postfix, rq->tail,
1577 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1578 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1580 size = rq->tail - rq->head;
1581 if (rq->tail < rq->head)
1582 size += rq->ring->size;
1584 ring = kmalloc(size, GFP_ATOMIC);
1586 const void *vaddr = rq->ring->vaddr;
1587 unsigned int head = rq->head;
1588 unsigned int len = 0;
1590 if (rq->tail < head) {
1591 len = rq->ring->size - head;
1592 memcpy(ring, vaddr + head, len);
1595 memcpy(ring + len, vaddr + head, size - len);
1597 hexdump(m, ring, size);
1602 static unsigned long list_count(struct list_head *list)
1604 struct list_head *pos;
1605 unsigned long count = 0;
1607 list_for_each(pos, list)
1613 static unsigned long read_ul(void *p, size_t x)
1615 return *(unsigned long *)(p + x);
1618 static void print_properties(struct intel_engine_cs *engine,
1619 struct drm_printer *m)
1621 static const struct pmap {
1626 .offset = offsetof(typeof(engine->props), x), \
1629 P(heartbeat_interval_ms),
1630 P(max_busywait_duration_ns),
1631 P(preempt_timeout_ms),
1633 P(timeslice_duration_ms),
1638 const struct pmap *p;
1640 drm_printf(m, "\tProperties:\n");
1641 for (p = props; p->name; p++)
1642 drm_printf(m, "\t\t%s: %lu [default %lu]\n",
1644 read_ul(&engine->props, p->offset),
1645 read_ul(&engine->defaults, p->offset));
1648 void intel_engine_dump(struct intel_engine_cs *engine,
1649 struct drm_printer *m,
1650 const char *header, ...)
1652 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1653 struct i915_request *rq;
1654 intel_wakeref_t wakeref;
1655 unsigned long flags;
1661 va_start(ap, header);
1662 drm_vprintf(m, header, &ap);
1666 if (intel_gt_is_wedged(engine->gt))
1667 drm_printf(m, "*** WEDGED ***\n");
1669 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1670 drm_printf(m, "\tBarriers?: %s\n",
1671 yesno(!llist_empty(&engine->barrier_tasks)));
1672 drm_printf(m, "\tLatency: %luus\n",
1673 ewma__engine_latency_read(&engine->latency));
1674 if (intel_engine_supports_stats(engine))
1675 drm_printf(m, "\tRuntime: %llums\n",
1676 ktime_to_ms(intel_engine_get_busy_time(engine,
1678 drm_printf(m, "\tForcewake: %x domains, %d active\n",
1679 engine->fw_domain, READ_ONCE(engine->fw_active));
1682 rq = READ_ONCE(engine->heartbeat.systole);
1684 drm_printf(m, "\tHeartbeat: %d ms ago\n",
1685 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1687 drm_printf(m, "\tReset count: %d (global %d)\n",
1688 i915_reset_engine_count(error, engine),
1689 i915_reset_count(error));
1690 print_properties(engine, m);
1692 drm_printf(m, "\tRequests:\n");
1694 spin_lock_irqsave(&engine->active.lock, flags);
1695 rq = intel_engine_find_active_request(engine);
1697 struct intel_timeline *tl = get_timeline(rq);
1699 i915_request_show(m, rq, "\t\tactive ", 0);
1701 drm_printf(m, "\t\tring->start: 0x%08x\n",
1702 i915_ggtt_offset(rq->ring->vma));
1703 drm_printf(m, "\t\tring->head: 0x%08x\n",
1705 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1707 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1709 drm_printf(m, "\t\tring->space: 0x%08x\n",
1713 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1715 intel_timeline_put(tl);
1718 print_request_ring(m, rq);
1720 if (rq->context->lrc_reg_state) {
1721 drm_printf(m, "Logical Ring Context:\n");
1722 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1725 drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
1726 spin_unlock_irqrestore(&engine->active.lock, flags);
1728 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
1729 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1731 intel_engine_print_registers(engine, m);
1732 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1734 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1737 intel_execlists_show_requests(engine, m, i915_request_show, 8);
1739 drm_printf(m, "HWSP:\n");
1740 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1742 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1744 intel_engine_print_breadcrumbs(engine, m);
1747 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
1750 ktime_t total = engine->stats.total;
1753 * If the engine is executing something at the moment
1754 * add it to the total.
1757 if (READ_ONCE(engine->stats.active))
1758 total = ktime_add(total, ktime_sub(*now, engine->stats.start));
1764 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1765 * @engine: engine to report on
1766 * @now: monotonic timestamp of sampling
1768 * Returns accumulated time @engine was busy since engine stats were enabled.
1770 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
1776 seq = read_seqcount_begin(&engine->stats.lock);
1777 total = __intel_engine_get_busy_time(engine, now);
1778 } while (read_seqcount_retry(&engine->stats.lock, seq));
1783 static bool match_ring(struct i915_request *rq)
1785 u32 ring = ENGINE_READ(rq->engine, RING_START);
1787 return ring == i915_ggtt_offset(rq->ring->vma);
1790 struct i915_request *
1791 intel_engine_find_active_request(struct intel_engine_cs *engine)
1793 struct i915_request *request, *active = NULL;
1796 * We are called by the error capture, reset and to dump engine
1797 * state at random points in time. In particular, note that neither is
1798 * crucially ordered with an interrupt. After a hang, the GPU is dead
1799 * and we assume that no more writes can happen (we waited long enough
1800 * for all writes that were in transaction to be flushed) - adding an
1801 * extra delay for a recent interrupt is pointless. Hence, we do
1802 * not need an engine->irq_seqno_barrier() before the seqno reads.
1803 * At all other times, we must assume the GPU is still running, but
1804 * we only care about the snapshot of this moment.
1806 lockdep_assert_held(&engine->active.lock);
1809 request = execlists_active(&engine->execlists);
1811 struct intel_timeline *tl = request->context->timeline;
1813 list_for_each_entry_from_reverse(request, &tl->requests, link) {
1814 if (__i915_request_is_complete(request))
1824 list_for_each_entry(request, &engine->active.requests, sched.link) {
1825 if (__i915_request_is_complete(request))
1828 if (!__i915_request_has_started(request))
1831 /* More than one preemptible request may match! */
1832 if (!match_ring(request))
1842 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1843 #include "mock_engine.c"
1844 #include "selftest_engine.c"
1845 #include "selftest_engine_cs.c"