drm/i915: Rename execlists->queue_priority to queue_priority_hint
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_engine_cs.c
CommitLineData
88d2ba2e
TU
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
f636edb2
CW
25#include <drm/drm_print.h>
26
88d2ba2e 27#include "i915_drv.h"
9f58892e 28#include "i915_reset.h"
88d2ba2e
TU
29#include "intel_ringbuffer.h"
30#include "intel_lrc.h"
31
63ffbcda
JL
32/* Haswell does have the CXT_SIZE register however it does not appear to be
33 * valid. Now, docs explain in dwords what is in the context object. The full
34 * size is 70720 bytes, however, the power context and execlist context will
35 * never be saved (power context is stored elsewhere, and execlists don't work
36 * on HSW) - so the final size, including the extra state required for the
37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
38 */
39#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
63ffbcda 40
7ab4adbd 41#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
63ffbcda
JL
42#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
43#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
3cf1934a 44#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
b86aa445 45#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
63ffbcda
JL
46
47#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
48
b8400f01 49struct engine_class_info {
88d2ba2e 50 const char *name;
b8400f01
OM
51 int (*init_legacy)(struct intel_engine_cs *engine);
52 int (*init_execlists)(struct intel_engine_cs *engine);
1803fcbc
TU
53
54 u8 uabi_class;
b8400f01
OM
55};
56
57static const struct engine_class_info intel_engine_classes[] = {
58 [RENDER_CLASS] = {
59 .name = "rcs",
60 .init_execlists = logical_render_ring_init,
61 .init_legacy = intel_init_render_ring_buffer,
1803fcbc 62 .uabi_class = I915_ENGINE_CLASS_RENDER,
b8400f01
OM
63 },
64 [COPY_ENGINE_CLASS] = {
65 .name = "bcs",
66 .init_execlists = logical_xcs_ring_init,
67 .init_legacy = intel_init_blt_ring_buffer,
1803fcbc 68 .uabi_class = I915_ENGINE_CLASS_COPY,
b8400f01
OM
69 },
70 [VIDEO_DECODE_CLASS] = {
71 .name = "vcs",
72 .init_execlists = logical_xcs_ring_init,
73 .init_legacy = intel_init_bsd_ring_buffer,
1803fcbc 74 .uabi_class = I915_ENGINE_CLASS_VIDEO,
b8400f01
OM
75 },
76 [VIDEO_ENHANCEMENT_CLASS] = {
77 .name = "vecs",
78 .init_execlists = logical_xcs_ring_init,
79 .init_legacy = intel_init_vebox_ring_buffer,
1803fcbc 80 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
b8400f01
OM
81 },
82};
83
80b216b9 84#define MAX_MMIO_BASES 3
b8400f01 85struct engine_info {
237ae7c7 86 unsigned int hw_id;
1d39f281 87 unsigned int uabi_id;
0908180b
DCS
88 u8 class;
89 u8 instance;
80b216b9
DCS
90 /* mmio bases table *must* be sorted in reverse gen order */
91 struct engine_mmio_base {
92 u32 gen : 8;
93 u32 base : 24;
94 } mmio_bases[MAX_MMIO_BASES];
b8400f01
OM
95};
96
97static const struct engine_info intel_engines[] = {
88d2ba2e 98 [RCS] = {
5ec2cf7e 99 .hw_id = RCS_HW,
1d39f281 100 .uabi_id = I915_EXEC_RENDER,
0908180b
DCS
101 .class = RENDER_CLASS,
102 .instance = 0,
80b216b9
DCS
103 .mmio_bases = {
104 { .gen = 1, .base = RENDER_RING_BASE }
105 },
88d2ba2e
TU
106 },
107 [BCS] = {
5ec2cf7e 108 .hw_id = BCS_HW,
1d39f281 109 .uabi_id = I915_EXEC_BLT,
0908180b
DCS
110 .class = COPY_ENGINE_CLASS,
111 .instance = 0,
80b216b9
DCS
112 .mmio_bases = {
113 { .gen = 6, .base = BLT_RING_BASE }
114 },
88d2ba2e
TU
115 },
116 [VCS] = {
5ec2cf7e 117 .hw_id = VCS_HW,
1d39f281 118 .uabi_id = I915_EXEC_BSD,
0908180b
DCS
119 .class = VIDEO_DECODE_CLASS,
120 .instance = 0,
80b216b9
DCS
121 .mmio_bases = {
122 { .gen = 11, .base = GEN11_BSD_RING_BASE },
123 { .gen = 6, .base = GEN6_BSD_RING_BASE },
124 { .gen = 4, .base = BSD_RING_BASE }
125 },
88d2ba2e
TU
126 },
127 [VCS2] = {
5ec2cf7e 128 .hw_id = VCS2_HW,
1d39f281 129 .uabi_id = I915_EXEC_BSD,
0908180b
DCS
130 .class = VIDEO_DECODE_CLASS,
131 .instance = 1,
80b216b9
DCS
132 .mmio_bases = {
133 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
134 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
135 },
88d2ba2e 136 },
5f79e7c6
OM
137 [VCS3] = {
138 .hw_id = VCS3_HW,
139 .uabi_id = I915_EXEC_BSD,
140 .class = VIDEO_DECODE_CLASS,
141 .instance = 2,
80b216b9
DCS
142 .mmio_bases = {
143 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
144 },
5f79e7c6
OM
145 },
146 [VCS4] = {
147 .hw_id = VCS4_HW,
148 .uabi_id = I915_EXEC_BSD,
149 .class = VIDEO_DECODE_CLASS,
150 .instance = 3,
80b216b9
DCS
151 .mmio_bases = {
152 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
153 },
5f79e7c6 154 },
88d2ba2e 155 [VECS] = {
5ec2cf7e 156 .hw_id = VECS_HW,
1d39f281 157 .uabi_id = I915_EXEC_VEBOX,
0908180b
DCS
158 .class = VIDEO_ENHANCEMENT_CLASS,
159 .instance = 0,
80b216b9
DCS
160 .mmio_bases = {
161 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
162 { .gen = 7, .base = VEBOX_RING_BASE }
163 },
88d2ba2e 164 },
5f79e7c6
OM
165 [VECS2] = {
166 .hw_id = VECS2_HW,
167 .uabi_id = I915_EXEC_VEBOX,
168 .class = VIDEO_ENHANCEMENT_CLASS,
169 .instance = 1,
80b216b9
DCS
170 .mmio_bases = {
171 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
172 },
5f79e7c6 173 },
88d2ba2e
TU
174};
175
63ffbcda
JL
176/**
177 * ___intel_engine_context_size() - return the size of the context for an engine
178 * @dev_priv: i915 device private
179 * @class: engine class
180 *
181 * Each engine class may require a different amount of space for a context
182 * image.
183 *
184 * Return: size (in bytes) of an engine class specific context image
185 *
186 * Note: this size includes the HWSP, which is part of the context image
187 * in LRC mode, but does not include the "shared data page" used with
188 * GuC submission. The caller should account for this if using the GuC.
189 */
190static u32
191__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
192{
193 u32 cxt_size;
194
195 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
196
197 switch (class) {
198 case RENDER_CLASS:
199 switch (INTEL_GEN(dev_priv)) {
200 default:
201 MISSING_CASE(INTEL_GEN(dev_priv));
7ab4adbd 202 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
b86aa445
TU
203 case 11:
204 return GEN11_LR_CONTEXT_RENDER_SIZE;
f65f8417 205 case 10:
7fd0b1a2 206 return GEN10_LR_CONTEXT_RENDER_SIZE;
63ffbcda
JL
207 case 9:
208 return GEN9_LR_CONTEXT_RENDER_SIZE;
209 case 8:
fb5c551a 210 return GEN8_LR_CONTEXT_RENDER_SIZE;
63ffbcda
JL
211 case 7:
212 if (IS_HASWELL(dev_priv))
213 return HSW_CXT_TOTAL_SIZE;
214
215 cxt_size = I915_READ(GEN7_CXT_SIZE);
216 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
217 PAGE_SIZE);
218 case 6:
219 cxt_size = I915_READ(CXT_SIZE);
220 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
221 PAGE_SIZE);
222 case 5:
223 case 4:
224 case 3:
225 case 2:
226 /* For the special day when i810 gets merged. */
227 case 1:
228 return 0;
229 }
230 break;
231 default:
232 MISSING_CASE(class);
f0d759f0 233 /* fall through */
63ffbcda
JL
234 case VIDEO_DECODE_CLASS:
235 case VIDEO_ENHANCEMENT_CLASS:
236 case COPY_ENGINE_CLASS:
237 if (INTEL_GEN(dev_priv) < 8)
238 return 0;
239 return GEN8_LR_CONTEXT_OTHER_SIZE;
240 }
241}
242
80b216b9
DCS
243static u32 __engine_mmio_base(struct drm_i915_private *i915,
244 const struct engine_mmio_base *bases)
245{
246 int i;
247
248 for (i = 0; i < MAX_MMIO_BASES; i++)
249 if (INTEL_GEN(i915) >= bases[i].gen)
250 break;
251
252 GEM_BUG_ON(i == MAX_MMIO_BASES);
253 GEM_BUG_ON(!bases[i].base);
254
255 return bases[i].base;
256}
257
74419daa
DCS
258static void __sprint_engine_name(char *name, const struct engine_info *info)
259{
260 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
261 intel_engine_classes[info->class].name,
262 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
263}
264
060f2322
CW
265void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
266{
267 struct drm_i915_private *dev_priv = engine->i915;
268 i915_reg_t hwstam;
269
270 /*
271 * Though they added more rings on g4x/ilk, they did not add
272 * per-engine HWSTAM until gen6.
273 */
274 if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
275 return;
276
277 hwstam = RING_HWSTAM(engine->mmio_base);
278 if (INTEL_GEN(dev_priv) >= 3)
279 I915_WRITE(hwstam, mask);
280 else
281 I915_WRITE16(hwstam, mask);
282}
283
284static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
285{
286 /* Mask off all writes into the unknown HWSP */
287 intel_engine_set_hwsp_writemask(engine, ~0u);
288}
289
3b3f1650 290static int
88d2ba2e
TU
291intel_engine_setup(struct drm_i915_private *dev_priv,
292 enum intel_engine_id id)
293{
294 const struct engine_info *info = &intel_engines[id];
3b3f1650
AG
295 struct intel_engine_cs *engine;
296
b8400f01 297 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
b8400f01 298
ac52da6a
DCS
299 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
300 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
301
bbb8a9d7 302 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
b46a33e2
TU
303 return -EINVAL;
304
bbb8a9d7 305 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
b46a33e2
TU
306 return -EINVAL;
307
bbb8a9d7 308 if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
b46a33e2
TU
309 return -EINVAL;
310
3b3f1650
AG
311 GEM_BUG_ON(dev_priv->engine[id]);
312 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
313 if (!engine)
314 return -ENOMEM;
88d2ba2e
TU
315
316 engine->id = id;
317 engine->i915 = dev_priv;
74419daa 318 __sprint_engine_name(engine->name, info);
5ec2cf7e 319 engine->hw_id = engine->guc_id = info->hw_id;
80b216b9 320 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
0908180b
DCS
321 engine->class = info->class;
322 engine->instance = info->instance;
88d2ba2e 323
1803fcbc 324 engine->uabi_id = info->uabi_id;
74419daa 325 engine->uabi_class = intel_engine_classes[info->class].uabi_class;
1803fcbc 326
63ffbcda
JL
327 engine->context_size = __intel_engine_context_size(dev_priv,
328 engine->class);
329 if (WARN_ON(engine->context_size > BIT(20)))
330 engine->context_size = 0;
481827b4
CW
331 if (engine->context_size)
332 DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
63ffbcda 333
0de9136d
CW
334 /* Nothing to do here, execute in order of dependencies */
335 engine->schedule = NULL;
336
741258cd 337 seqlock_init(&engine->stats.lock);
30e17b78 338
3fc03069
CD
339 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
340
060f2322
CW
341 /* Scrub mmio state on takeover */
342 intel_engine_sanitize_mmio(engine);
343
b46a33e2 344 dev_priv->engine_class[info->class][info->instance] = engine;
3b3f1650
AG
345 dev_priv->engine[id] = engine;
346 return 0;
88d2ba2e
TU
347}
348
349/**
63ffbcda 350 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
bf9e8429 351 * @dev_priv: i915 device private
88d2ba2e
TU
352 *
353 * Return: non-zero if the initialization failed.
354 */
63ffbcda 355int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
88d2ba2e 356{
c1bb1145 357 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
5f9be054 358 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
3b3f1650
AG
359 struct intel_engine_cs *engine;
360 enum intel_engine_id id;
5f9be054 361 unsigned int mask = 0;
88d2ba2e 362 unsigned int i;
bb8f0f5a 363 int err;
88d2ba2e 364
70006ad6
TU
365 WARN_ON(ring_mask == 0);
366 WARN_ON(ring_mask &
74f6e183 367 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
88d2ba2e 368
645ff9e3
MW
369 if (i915_inject_load_failure())
370 return -ENODEV;
371
88d2ba2e
TU
372 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
373 if (!HAS_ENGINE(dev_priv, i))
374 continue;
375
bb8f0f5a
CW
376 err = intel_engine_setup(dev_priv, i);
377 if (err)
378 goto cleanup;
379
380 mask |= ENGINE_MASK(i);
381 }
382
383 /*
384 * Catch failures to update intel_engines table when the new engines
385 * are added to the driver by a warning and disabling the forgotten
386 * engines.
387 */
388 if (WARN_ON(mask != ring_mask))
389 device_info->ring_mask = mask;
390
5f9be054
CW
391 /* We always presume we have at least RCS available for later probing */
392 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
393 err = -ENODEV;
394 goto cleanup;
395 }
396
0258404f 397 RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
bb8f0f5a 398
ce453b3e
MT
399 i915_check_and_clear_faults(dev_priv);
400
bb8f0f5a
CW
401 return 0;
402
403cleanup:
404 for_each_engine(engine, dev_priv, id)
405 kfree(engine);
406 return err;
407}
408
409/**
63ffbcda 410 * intel_engines_init() - init the Engine Command Streamers
bb8f0f5a
CW
411 * @dev_priv: i915 device private
412 *
413 * Return: non-zero if the initialization failed.
414 */
415int intel_engines_init(struct drm_i915_private *dev_priv)
416{
bb8f0f5a
CW
417 struct intel_engine_cs *engine;
418 enum intel_engine_id id, err_id;
33def1ff 419 int err;
bb8f0f5a
CW
420
421 for_each_engine(engine, dev_priv, id) {
b8400f01
OM
422 const struct engine_class_info *class_info =
423 &intel_engine_classes[engine->class];
bb8f0f5a
CW
424 int (*init)(struct intel_engine_cs *engine);
425
fb5c551a 426 if (HAS_EXECLISTS(dev_priv))
b8400f01 427 init = class_info->init_execlists;
88d2ba2e 428 else
b8400f01 429 init = class_info->init_legacy;
33def1ff
TU
430
431 err = -EINVAL;
432 err_id = id;
433
bbb8a9d7 434 if (GEM_DEBUG_WARN_ON(!init))
33def1ff 435 goto cleanup;
88d2ba2e 436
bb8f0f5a 437 err = init(engine);
33def1ff 438 if (err)
88d2ba2e
TU
439 goto cleanup;
440
ff44ad51 441 GEM_BUG_ON(!engine->submit_request);
88d2ba2e
TU
442 }
443
88d2ba2e
TU
444 return 0;
445
446cleanup:
3b3f1650 447 for_each_engine(engine, dev_priv, id) {
33def1ff 448 if (id >= err_id) {
bb8f0f5a 449 kfree(engine);
33def1ff
TU
450 dev_priv->engine[id] = NULL;
451 } else {
8ee7c6e2 452 dev_priv->gt.cleanup_engine(engine);
33def1ff 453 }
88d2ba2e 454 }
bb8f0f5a 455 return err;
88d2ba2e
TU
456}
457
6faf5916 458void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
57f275a2 459{
57f275a2 460 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
73cb9701 461
57f275a2
CW
462 /* After manually advancing the seqno, fake the interrupt in case
463 * there are any waiters for that seqno.
464 */
465 intel_engine_wakeup(engine);
2ca9faa5
CW
466
467 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
57f275a2
CW
468}
469
c5781351
MW
470static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
471{
472 i915_gem_batch_pool_init(&engine->batch_pool, engine);
473}
474
19df9a57
MK
475static void intel_engine_init_execlist(struct intel_engine_cs *engine)
476{
477 struct intel_engine_execlists * const execlists = &engine->execlists;
478
76e70087 479 execlists->port_mask = 1;
410ed573 480 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
76e70087
MK
481 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
482
4d97cbe0 483 execlists->queue_priority_hint = INT_MIN;
655250a8 484 execlists->queue = RB_ROOT_CACHED;
19df9a57
MK
485}
486
a0e731f4 487static void cleanup_status_page(struct intel_engine_cs *engine)
486e93f7 488{
0ca88ba0
CW
489 struct i915_vma *vma;
490
060f2322
CW
491 /* Prevent writes into HWSP after returning the page to the system */
492 intel_engine_set_hwsp_writemask(engine, ~0u);
493
0ca88ba0
CW
494 vma = fetch_and_zero(&engine->status_page.vma);
495 if (!vma)
496 return;
486e93f7 497
0ca88ba0
CW
498 if (!HWS_NEEDS_PHYSICAL(engine->i915))
499 i915_vma_unpin(vma);
500
501 i915_gem_object_unpin_map(vma->obj);
502 __i915_gem_object_release_unless_active(vma->obj);
503}
504
505static int pin_ggtt_status_page(struct intel_engine_cs *engine,
506 struct i915_vma *vma)
507{
508 unsigned int flags;
509
510 flags = PIN_GLOBAL;
511 if (!HAS_LLC(engine->i915))
512 /*
513 * On g33, we cannot place HWS above 256MiB, so
514 * restrict its pinning to the low mappable arena.
515 * Though this restriction is not documented for
516 * gen4, gen5, or byt, they also behave similarly
517 * and hang if the HWS is placed at the top of the
518 * GTT. To generalise, it appears that all !llc
519 * platforms have issues with us placing the HWS
520 * above the mappable region (even though we never
521 * actually map it).
522 */
523 flags |= PIN_MAPPABLE;
524 else
525 flags |= PIN_HIGH;
486e93f7 526
0ca88ba0 527 return i915_vma_pin(vma, 0, 0, flags);
486e93f7
DCS
528}
529
530static int init_status_page(struct intel_engine_cs *engine)
531{
532 struct drm_i915_gem_object *obj;
533 struct i915_vma *vma;
486e93f7
DCS
534 void *vaddr;
535 int ret;
536
0ca88ba0
CW
537 /*
538 * Though the HWS register does support 36bit addresses, historically
539 * we have had hangs and corruption reported due to wild writes if
540 * the HWS is placed above 4G. We only allow objects to be allocated
541 * in GFP_DMA32 for i965, and no earlier physical address users had
542 * access to more than 4G.
543 */
486e93f7
DCS
544 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
545 if (IS_ERR(obj)) {
546 DRM_ERROR("Failed to allocate status page\n");
547 return PTR_ERR(obj);
548 }
549
550 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
551 if (ret)
552 goto err;
553
82ad6443 554 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
486e93f7
DCS
555 if (IS_ERR(vma)) {
556 ret = PTR_ERR(vma);
557 goto err;
558 }
559
486e93f7
DCS
560 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
561 if (IS_ERR(vaddr)) {
562 ret = PTR_ERR(vaddr);
0ca88ba0 563 goto err;
486e93f7
DCS
564 }
565
0ca88ba0 566 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
486e93f7 567 engine->status_page.vma = vma;
0ca88ba0
CW
568
569 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
570 ret = pin_ggtt_status_page(engine, vma);
571 if (ret)
572 goto err_unpin;
573 }
574
486e93f7
DCS
575 return 0;
576
577err_unpin:
0ca88ba0 578 i915_gem_object_unpin_map(obj);
486e93f7
DCS
579err:
580 i915_gem_object_put(obj);
581 return ret;
582}
583
52954edd
CW
584/**
585 * intel_engines_setup_common - setup engine state not requiring hw access
586 * @engine: Engine to setup.
587 *
588 * Initializes @engine@ structure members shared between legacy and execlists
589 * submission modes which do not require hardware access.
590 *
591 * Typically done early in the submission mode specific engine setup stage.
592 */
593int intel_engine_setup_common(struct intel_engine_cs *engine)
594{
595 int err;
596
597 err = init_status_page(engine);
598 if (err)
599 return err;
600
601 err = i915_timeline_init(engine->i915,
602 &engine->timeline,
603 engine->name,
604 engine->status_page.vma);
605 if (err)
606 goto err_hwsp;
607
608 i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
609
610 intel_engine_init_execlist(engine);
611 intel_engine_init_hangcheck(engine);
612 intel_engine_init_batch_pool(engine);
613 intel_engine_init_cmd_parser(engine);
614
615 return 0;
616
617err_hwsp:
618 cleanup_status_page(engine);
619 return err;
620}
621
1fc44d9b
CW
622static void __intel_context_unpin(struct i915_gem_context *ctx,
623 struct intel_engine_cs *engine)
624{
625 intel_context_unpin(to_intel_context(ctx, engine));
626}
627
e1a73a54
CW
628struct measure_breadcrumb {
629 struct i915_request rq;
630 struct i915_timeline timeline;
631 struct intel_ring ring;
632 u32 cs[1024];
633};
634
9fa4973e 635static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
e1a73a54
CW
636{
637 struct measure_breadcrumb *frame;
52954edd 638 int dw = -ENOMEM;
e1a73a54
CW
639
640 GEM_BUG_ON(!engine->i915->gt.scratch);
641
642 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
643 if (!frame)
644 return -ENOMEM;
645
52954edd
CW
646 if (i915_timeline_init(engine->i915,
647 &frame->timeline, "measure",
648 engine->status_page.vma))
649 goto out_frame;
e1a73a54
CW
650
651 INIT_LIST_HEAD(&frame->ring.request_list);
652 frame->ring.timeline = &frame->timeline;
653 frame->ring.vaddr = frame->cs;
654 frame->ring.size = sizeof(frame->cs);
655 frame->ring.effective_size = frame->ring.size;
656 intel_ring_update_space(&frame->ring);
657
658 frame->rq.i915 = engine->i915;
659 frame->rq.engine = engine;
660 frame->rq.ring = &frame->ring;
661 frame->rq.timeline = &frame->timeline;
662
5013eb8c
CW
663 dw = i915_timeline_pin(&frame->timeline);
664 if (dw < 0)
665 goto out_timeline;
666
85474441 667 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
e1a73a54 668
5013eb8c 669 i915_timeline_unpin(&frame->timeline);
e1a73a54 670
5013eb8c
CW
671out_timeline:
672 i915_timeline_fini(&frame->timeline);
52954edd
CW
673out_frame:
674 kfree(frame);
e1a73a54
CW
675 return dw;
676}
677
019bf277
TU
678/**
679 * intel_engines_init_common - initialize cengine state which might require hw access
680 * @engine: Engine to initialize.
681 *
682 * Initializes @engine@ structure members shared between legacy and execlists
683 * submission modes which do require hardware access.
684 *
685 * Typcally done at later stages of submission mode specific engine setup.
686 *
687 * Returns zero on success or an error code on failure.
688 */
689int intel_engine_init_common(struct intel_engine_cs *engine)
690{
1fc44d9b
CW
691 struct drm_i915_private *i915 = engine->i915;
692 struct intel_context *ce;
019bf277
TU
693 int ret;
694
ff44ad51
CW
695 engine->set_default_submission(engine);
696
e8a9c58f
CW
697 /* We may need to do things with the shrinker which
698 * require us to immediately switch back to the default
699 * context. This can cause a problem as pinning the
700 * default context also requires GTT space which may not
701 * be available. To avoid this we always pin the default
702 * context.
703 */
1fc44d9b
CW
704 ce = intel_context_pin(i915->kernel_context, engine);
705 if (IS_ERR(ce))
706 return PTR_ERR(ce);
019bf277 707
e7af3116
CW
708 /*
709 * Similarly the preempt context must always be available so that
710 * we can interrupt the engine at any time.
711 */
1fc44d9b
CW
712 if (i915->preempt_context) {
713 ce = intel_context_pin(i915->preempt_context, engine);
714 if (IS_ERR(ce)) {
715 ret = PTR_ERR(ce);
e7af3116
CW
716 goto err_unpin_kernel;
717 }
718 }
719
e8a9c58f
CW
720 ret = intel_engine_init_breadcrumbs(engine);
721 if (ret)
e7af3116 722 goto err_unpin_preempt;
e8a9c58f 723
9fa4973e 724 ret = measure_breadcrumb_dw(engine);
e1a73a54 725 if (ret < 0)
52954edd 726 goto err_breadcrumbs;
e1a73a54 727
85474441 728 engine->emit_fini_breadcrumb_dw = ret;
e1a73a54 729
7756e454 730 return 0;
e8a9c58f 731
486e93f7
DCS
732err_breadcrumbs:
733 intel_engine_fini_breadcrumbs(engine);
e7af3116 734err_unpin_preempt:
1fc44d9b
CW
735 if (i915->preempt_context)
736 __intel_context_unpin(i915->preempt_context, engine);
737
e7af3116 738err_unpin_kernel:
1fc44d9b 739 __intel_context_unpin(i915->kernel_context, engine);
e8a9c58f 740 return ret;
019bf277 741}
96a945aa
CW
742
743/**
744 * intel_engines_cleanup_common - cleans up the engine state created by
745 * the common initiailizers.
746 * @engine: Engine to cleanup.
747 *
748 * This cleans up everything created by the common helpers.
749 */
750void intel_engine_cleanup_common(struct intel_engine_cs *engine)
751{
1fc44d9b
CW
752 struct drm_i915_private *i915 = engine->i915;
753
a0e731f4 754 cleanup_status_page(engine);
486e93f7 755
96a945aa 756 intel_engine_fini_breadcrumbs(engine);
7756e454 757 intel_engine_cleanup_cmd_parser(engine);
96a945aa 758 i915_gem_batch_pool_fini(&engine->batch_pool);
e8a9c58f 759
d2b4b979
CW
760 if (engine->default_state)
761 i915_gem_object_put(engine->default_state);
762
1fc44d9b
CW
763 if (i915->preempt_context)
764 __intel_context_unpin(i915->preempt_context, engine);
765 __intel_context_unpin(i915->kernel_context, engine);
a89d1f92
CW
766
767 i915_timeline_fini(&engine->timeline);
4a15c75c 768
452420d2 769 intel_wa_list_free(&engine->ctx_wa_list);
4a15c75c 770 intel_wa_list_free(&engine->wa_list);
69bcdecf 771 intel_wa_list_free(&engine->whitelist);
96a945aa 772}
1b36595f 773
3ceda3a4 774u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1b36595f
CW
775{
776 struct drm_i915_private *dev_priv = engine->i915;
777 u64 acthd;
778
779 if (INTEL_GEN(dev_priv) >= 8)
780 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
781 RING_ACTHD_UDW(engine->mmio_base));
782 else if (INTEL_GEN(dev_priv) >= 4)
783 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
784 else
785 acthd = I915_READ(ACTHD);
786
787 return acthd;
788}
789
3ceda3a4 790u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1b36595f
CW
791{
792 struct drm_i915_private *dev_priv = engine->i915;
793 u64 bbaddr;
794
795 if (INTEL_GEN(dev_priv) >= 8)
796 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
797 RING_BBADDR_UDW(engine->mmio_base));
798 else
799 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
800
801 return bbaddr;
802}
0e704476 803
3f6e9822
CW
804int intel_engine_stop_cs(struct intel_engine_cs *engine)
805{
806 struct drm_i915_private *dev_priv = engine->i915;
807 const u32 base = engine->mmio_base;
808 const i915_reg_t mode = RING_MI_MODE(base);
809 int err;
810
811 if (INTEL_GEN(dev_priv) < 3)
812 return -ENODEV;
813
814 GEM_TRACE("%s\n", engine->name);
815
816 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
817
818 err = 0;
819 if (__intel_wait_for_register_fw(dev_priv,
820 mode, MODE_IDLE, MODE_IDLE,
821 1000, 0,
822 NULL)) {
823 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
824 err = -ETIMEDOUT;
825 }
826
827 /* A final mmio read to let GPU writes be hopefully flushed to memory */
828 POSTING_READ_FW(mode);
829
830 return err;
831}
832
a99b32a6
CW
833void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
834{
835 struct drm_i915_private *dev_priv = engine->i915;
836
837 GEM_TRACE("%s\n", engine->name);
838
839 I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
840 _MASKED_BIT_DISABLE(STOP_RING));
841}
842
0e704476
CW
843const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
844{
845 switch (type) {
846 case I915_CACHE_NONE: return " uncached";
847 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
848 case I915_CACHE_L3_LLC: return " L3+LLC";
849 case I915_CACHE_WT: return " WT";
850 default: return "";
851 }
852}
853
1e40d4ae
YZ
854u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
855{
0258404f 856 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
1e40d4ae
YZ
857 u32 mcr_s_ss_select;
858 u32 slice = fls(sseu->slice_mask);
859 u32 subslice = fls(sseu->subslice_mask[slice]);
860
cf819eff 861 if (IS_GEN(dev_priv, 10))
1e40d4ae
YZ
862 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
863 GEN8_MCR_SUBSLICE(subslice);
d78fa508
YZ
864 else if (INTEL_GEN(dev_priv) >= 11)
865 mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
866 GEN11_MCR_SUBSLICE(subslice);
1e40d4ae
YZ
867 else
868 mcr_s_ss_select = 0;
869
870 return mcr_s_ss_select;
871}
872
739f3abd 873static inline u32
0e704476
CW
874read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
875 int subslice, i915_reg_t reg)
876{
739f3abd
JN
877 u32 mcr_slice_subslice_mask;
878 u32 mcr_slice_subslice_select;
879 u32 default_mcr_s_ss_select;
880 u32 mcr;
881 u32 ret;
0e704476
CW
882 enum forcewake_domains fw_domains;
883
d3d57927
KG
884 if (INTEL_GEN(dev_priv) >= 11) {
885 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
886 GEN11_MCR_SUBSLICE_MASK;
887 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
888 GEN11_MCR_SUBSLICE(subslice);
889 } else {
890 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
891 GEN8_MCR_SUBSLICE_MASK;
892 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
893 GEN8_MCR_SUBSLICE(subslice);
894 }
895
1e40d4ae
YZ
896 default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
897
0e704476
CW
898 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
899 FW_REG_READ);
900 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
901 GEN8_MCR_SELECTOR,
902 FW_REG_READ | FW_REG_WRITE);
903
904 spin_lock_irq(&dev_priv->uncore.lock);
905 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
906
907 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
1e40d4ae
YZ
908
909 WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
910 default_mcr_s_ss_select);
911
d3d57927
KG
912 mcr &= ~mcr_slice_subslice_mask;
913 mcr |= mcr_slice_subslice_select;
0e704476
CW
914 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
915
916 ret = I915_READ_FW(reg);
917
d3d57927 918 mcr &= ~mcr_slice_subslice_mask;
1e40d4ae
YZ
919 mcr |= default_mcr_s_ss_select;
920
0e704476
CW
921 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
922
923 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
924 spin_unlock_irq(&dev_priv->uncore.lock);
925
926 return ret;
927}
928
929/* NB: please notice the memset */
930void intel_engine_get_instdone(struct intel_engine_cs *engine,
931 struct intel_instdone *instdone)
932{
933 struct drm_i915_private *dev_priv = engine->i915;
934 u32 mmio_base = engine->mmio_base;
935 int slice;
936 int subslice;
937
938 memset(instdone, 0, sizeof(*instdone));
939
940 switch (INTEL_GEN(dev_priv)) {
941 default:
942 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
943
944 if (engine->id != RCS)
945 break;
946
947 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
948 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
949 instdone->sampler[slice][subslice] =
950 read_subslice_reg(dev_priv, slice, subslice,
951 GEN7_SAMPLER_INSTDONE);
952 instdone->row[slice][subslice] =
953 read_subslice_reg(dev_priv, slice, subslice,
954 GEN7_ROW_INSTDONE);
955 }
956 break;
957 case 7:
958 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
959
960 if (engine->id != RCS)
961 break;
962
963 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
964 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
965 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
966
967 break;
968 case 6:
969 case 5:
970 case 4:
971 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
972
973 if (engine->id == RCS)
974 /* HACK: Using the wrong struct member */
975 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
976 break;
977 case 3:
978 case 2:
979 instdone->instdone = I915_READ(GEN2_INSTDONE);
980 break;
981 }
982}
f97fbf96 983
a091d4ee
CW
984static bool ring_is_idle(struct intel_engine_cs *engine)
985{
986 struct drm_i915_private *dev_priv = engine->i915;
538ef96b 987 intel_wakeref_t wakeref;
a091d4ee
CW
988 bool idle = true;
989
293f8c0f
CW
990 if (I915_SELFTEST_ONLY(!engine->mmio_base))
991 return true;
992
74d00d28 993 /* If the whole device is asleep, the engine must be idle */
538ef96b
CW
994 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
995 if (!wakeref)
74d00d28 996 return true;
a091d4ee 997
aed2fc10
CW
998 /* First check that no commands are left in the ring */
999 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1000 (I915_READ_TAIL(engine) & TAIL_ADDR))
1001 idle = false;
1002
a091d4ee
CW
1003 /* No bit for gen2, so assume the CS parser is idle */
1004 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1005 idle = false;
1006
538ef96b 1007 intel_runtime_pm_put(dev_priv, wakeref);
a091d4ee
CW
1008
1009 return idle;
1010}
1011
5400367a
CW
1012/**
1013 * intel_engine_is_idle() - Report if the engine has finished process all work
1014 * @engine: the intel_engine_cs
1015 *
1016 * Return true if there are no requests pending, nothing left to be submitted
1017 * to hardware, and that the engine is idle.
1018 */
1019bool intel_engine_is_idle(struct intel_engine_cs *engine)
1020{
1021 struct drm_i915_private *dev_priv = engine->i915;
1022
a8e9a419
CW
1023 /* More white lies, if wedged, hw state is inconsistent */
1024 if (i915_terminally_wedged(&dev_priv->gpu_error))
1025 return true;
1026
5400367a 1027 /* Any inflight/incomplete requests? */
97f06158 1028 if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
5400367a
CW
1029 return false;
1030
4a118ecb 1031 /* Waiting to drain ELSP? */
dd0cf235 1032 if (READ_ONCE(engine->execlists.active)) {
9701975e 1033 struct tasklet_struct *t = &engine->execlists.tasklet;
dd0cf235 1034
26eb4cd6 1035 local_bh_disable();
9701975e
CW
1036 if (tasklet_trylock(t)) {
1037 /* Must wait for any GPU reset in progress. */
1038 if (__tasklet_is_enabled(t))
1039 t->func(t->data);
1040 tasklet_unlock(t);
dd0cf235 1041 }
26eb4cd6 1042 local_bh_enable();
dd0cf235 1043
22495b68
CW
1044 /* Otherwise flush the tasklet if it was on another cpu */
1045 tasklet_unlock_wait(t);
1046
9701975e 1047 if (READ_ONCE(engine->execlists.active))
dd0cf235
CW
1048 return false;
1049 }
5400367a 1050
dd0cf235 1051 /* ELSP is empty, but there are ready requests? E.g. after reset */
655250a8 1052 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
d6edb6e3
CW
1053 return false;
1054
5400367a 1055 /* Ring stopped? */
293f8c0f 1056 return ring_is_idle(engine);
5400367a
CW
1057}
1058
05425249
CW
1059bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1060{
1061 struct intel_engine_cs *engine;
1062 enum intel_engine_id id;
1063
d7dc4131
CW
1064 /*
1065 * If the driver is wedged, HW state may be very inconsistent and
8490ae20
CW
1066 * report that it is still busy, even though we have stopped using it.
1067 */
1068 if (i915_terminally_wedged(&dev_priv->gpu_error))
1069 return true;
1070
05425249
CW
1071 for_each_engine(engine, dev_priv, id) {
1072 if (!intel_engine_is_idle(engine))
1073 return false;
1074 }
1075
1076 return true;
1077}
1078
ae6c4574
CW
1079/**
1080 * intel_engine_has_kernel_context:
1081 * @engine: the engine
1082 *
1083 * Returns true if the last context to be executed on this engine, or has been
1084 * executed if the engine is already idle, is the kernel context
1085 * (#i915.kernel_context).
1086 */
20ccd4d3
CW
1087bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1088{
1fc44d9b
CW
1089 const struct intel_context *kernel_context =
1090 to_intel_context(engine->i915->kernel_context, engine);
e61e0f51 1091 struct i915_request *rq;
ae6c4574
CW
1092
1093 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1094
1095 /*
1096 * Check the last context seen by the engine. If active, it will be
1097 * the last request that remains in the timeline. When idle, it is
1098 * the last executed context as tracked by retirement.
1099 */
a89d1f92 1100 rq = __i915_gem_active_peek(&engine->timeline.last_request);
ae6c4574 1101 if (rq)
1fc44d9b 1102 return rq->hw_context == kernel_context;
ae6c4574
CW
1103 else
1104 return engine->last_retired_context == kernel_context;
20ccd4d3
CW
1105}
1106
ff44ad51
CW
1107void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1108{
1109 struct intel_engine_cs *engine;
1110 enum intel_engine_id id;
1111
1112 for_each_engine(engine, i915, id)
1113 engine->set_default_submission(engine);
1114}
1115
55277e1f
CW
1116static bool reset_engines(struct drm_i915_private *i915)
1117{
1118 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1119 return false;
1120
1121 return intel_gpu_reset(i915, ALL_ENGINES) == 0;
1122}
1123
4fdd5b4e
CW
1124/**
1125 * intel_engines_sanitize: called after the GPU has lost power
1126 * @i915: the i915 device
55277e1f 1127 * @force: ignore a failed reset and sanitize engine state anyway
4fdd5b4e
CW
1128 *
1129 * Anytime we reset the GPU, either with an explicit GPU reset or through a
1130 * PCI power cycle, the GPU loses state and we must reset our state tracking
1131 * to match. Note that calling intel_engines_sanitize() if the GPU has not
1132 * been reset results in much confusion!
1133 */
55277e1f 1134void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
4fdd5b4e
CW
1135{
1136 struct intel_engine_cs *engine;
1137 enum intel_engine_id id;
1138
1139 GEM_TRACE("\n");
1140
55277e1f
CW
1141 if (!reset_engines(i915) && !force)
1142 return;
1143
eb8d0f5a
CW
1144 for_each_engine(engine, i915, id)
1145 intel_engine_reset(engine, false);
4fdd5b4e
CW
1146}
1147
aba5e278
CW
1148/**
1149 * intel_engines_park: called when the GT is transitioning from busy->idle
1150 * @i915: the i915 device
1151 *
1152 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1153 * Time for us to tidy and put away our toys (release resources back to the
1154 * system).
1155 */
1156void intel_engines_park(struct drm_i915_private *i915)
6c067579
CW
1157{
1158 struct intel_engine_cs *engine;
1159 enum intel_engine_id id;
1160
1161 for_each_engine(engine, i915, id) {
820c5bbb
CW
1162 /* Flush the residual irq tasklets first. */
1163 intel_engine_disarm_breadcrumbs(engine);
c6dce8f1 1164 tasklet_kill(&engine->execlists.tasklet);
820c5bbb 1165
3265124a
CW
1166 /*
1167 * We are committed now to parking the engines, make sure there
1168 * will be no more interrupts arriving later and the engines
1169 * are truly idle.
1170 */
30b29406 1171 if (wait_for(intel_engine_is_idle(engine), 10)) {
3265124a
CW
1172 struct drm_printer p = drm_debug_printer(__func__);
1173
30b29406
CW
1174 dev_err(i915->drm.dev,
1175 "%s is not idle before parking\n",
1176 engine->name);
0db18b17 1177 intel_engine_dump(engine, &p, NULL);
3265124a
CW
1178 }
1179
15c83c43 1180 /* Must be reset upon idling, or we may miss the busy wakeup. */
4d97cbe0 1181 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
15c83c43 1182
aba5e278
CW
1183 if (engine->park)
1184 engine->park(engine);
1185
fe0c4935
CW
1186 if (engine->pinned_default_state) {
1187 i915_gem_object_unpin_map(engine->default_state);
1188 engine->pinned_default_state = NULL;
1189 }
1190
aba5e278 1191 i915_gem_batch_pool_fini(&engine->batch_pool);
b620e870 1192 engine->execlists.no_priolist = false;
6c067579
CW
1193 }
1194}
1195
aba5e278
CW
1196/**
1197 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1198 * @i915: the i915 device
1199 *
1200 * The GT was idle and now about to fire up with some new user requests.
1201 */
1202void intel_engines_unpark(struct drm_i915_private *i915)
1203{
1204 struct intel_engine_cs *engine;
1205 enum intel_engine_id id;
1206
1207 for_each_engine(engine, i915, id) {
fe0c4935
CW
1208 void *map;
1209
1210 /* Pin the default state for fast resets from atomic context. */
1211 map = NULL;
1212 if (engine->default_state)
1213 map = i915_gem_object_pin_map(engine->default_state,
1214 I915_MAP_WB);
1215 if (!IS_ERR_OR_NULL(map))
1216 engine->pinned_default_state = map;
1217
aba5e278
CW
1218 if (engine->unpark)
1219 engine->unpark(engine);
e21b1413
CW
1220
1221 intel_engine_init_hangcheck(engine);
aba5e278
CW
1222 }
1223}
1224
01278cb1
CW
1225/**
1226 * intel_engine_lost_context: called when the GPU is reset into unknown state
1227 * @engine: the engine
1228 *
1229 * We have either reset the GPU or otherwise about to lose state tracking of
1230 * the current GPU logical state (e.g. suspend). On next use, it is therefore
1231 * imperative that we make no presumptions about the current state and load
1232 * from scratch.
1233 */
1234void intel_engine_lost_context(struct intel_engine_cs *engine)
1235{
1fc44d9b 1236 struct intel_context *ce;
01278cb1
CW
1237
1238 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1239
1fc44d9b
CW
1240 ce = fetch_and_zero(&engine->last_retired_context);
1241 if (ce)
1242 intel_context_unpin(ce);
01278cb1
CW
1243}
1244
90cad095
CW
1245bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1246{
1247 switch (INTEL_GEN(engine->i915)) {
1248 case 2:
1249 return false; /* uses physical not virtual addresses */
1250 case 3:
1251 /* maybe only uses physical not virtual addresses */
1252 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1253 case 6:
1254 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1255 default:
1256 return true;
1257 }
1258}
1259
d2b4b979
CW
1260unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
1261{
1262 struct intel_engine_cs *engine;
1263 enum intel_engine_id id;
1264 unsigned int which;
1265
1266 which = 0;
1267 for_each_engine(engine, i915, id)
1268 if (engine->default_state)
1269 which |= BIT(engine->uabi_class);
1270
1271 return which;
1272}
1273
247870ac
CW
1274static int print_sched_attr(struct drm_i915_private *i915,
1275 const struct i915_sched_attr *attr,
1276 char *buf, int x, int len)
b7268c5e
CW
1277{
1278 if (attr->priority == I915_PRIORITY_INVALID)
247870ac
CW
1279 return x;
1280
1281 x += snprintf(buf + x, len - x,
1282 " prio=%d", attr->priority);
b7268c5e 1283
247870ac 1284 return x;
b7268c5e
CW
1285}
1286
f636edb2 1287static void print_request(struct drm_printer *m,
e61e0f51 1288 struct i915_request *rq,
f636edb2
CW
1289 const char *prefix)
1290{
ab268151 1291 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
96d4f03c 1292 char buf[80] = "";
247870ac
CW
1293 int x = 0;
1294
1295 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
ab268151 1296
b312d8ca 1297 drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n",
b7268c5e 1298 prefix,
a27d5a44 1299 rq->global_seqno,
85474441
CW
1300 i915_request_completed(rq) ? "!" :
1301 i915_request_started(rq) ? "*" :
1302 "",
247870ac
CW
1303 rq->fence.context, rq->fence.seqno,
1304 buf,
f636edb2 1305 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
ab268151 1306 name);
f636edb2
CW
1307}
1308
c1bf2728
CW
1309static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1310{
1311 const size_t rowsize = 8 * sizeof(u32);
1312 const void *prev = NULL;
1313 bool skip = false;
1314 size_t pos;
1315
1316 for (pos = 0; pos < len; pos += rowsize) {
1317 char line[128];
1318
1319 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1320 if (!skip) {
1321 drm_printf(m, "*\n");
1322 skip = true;
1323 }
1324 continue;
1325 }
1326
1327 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1328 rowsize, sizeof(u32),
1329 line, sizeof(line),
1330 false) >= sizeof(line));
286e6153 1331 drm_printf(m, "[%04zx] %s\n", pos, line);
c1bf2728
CW
1332
1333 prev = buf + pos;
1334 skip = false;
1335 }
1336}
1337
3ceda3a4
CW
1338static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1339 struct drm_printer *m)
f636edb2 1340{
f636edb2 1341 struct drm_i915_private *dev_priv = engine->i915;
3ceda3a4
CW
1342 const struct intel_engine_execlists * const execlists =
1343 &engine->execlists;
f636edb2
CW
1344 u64 addr;
1345
00690008 1346 if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
e62230de 1347 drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
3ceda3a4
CW
1348 drm_printf(m, "\tRING_START: 0x%08x\n",
1349 I915_READ(RING_START(engine->mmio_base)));
1350 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1351 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
1352 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1353 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
3c75de5b 1354 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
f636edb2 1355 I915_READ(RING_CTL(engine->mmio_base)),
3c75de5b
CW
1356 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1357 if (INTEL_GEN(engine->i915) > 2) {
1358 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1359 I915_READ(RING_MI_MODE(engine->mmio_base)),
1360 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1361 }
3ceda3a4
CW
1362
1363 if (INTEL_GEN(dev_priv) >= 6) {
1364 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1365 }
1366
f636edb2
CW
1367 addr = intel_engine_get_active_head(engine);
1368 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1369 upper_32_bits(addr), lower_32_bits(addr));
1370 addr = intel_engine_get_last_batch_head(engine);
1371 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1372 upper_32_bits(addr), lower_32_bits(addr));
a0cf5790
CW
1373 if (INTEL_GEN(dev_priv) >= 8)
1374 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
1375 RING_DMA_FADD_UDW(engine->mmio_base));
1376 else if (INTEL_GEN(dev_priv) >= 4)
1377 addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1378 else
1379 addr = I915_READ(DMA_FADD_I8XX);
1380 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1381 upper_32_bits(addr), lower_32_bits(addr));
1382 if (INTEL_GEN(dev_priv) >= 4) {
1383 drm_printf(m, "\tIPEIR: 0x%08x\n",
1384 I915_READ(RING_IPEIR(engine->mmio_base)));
1385 drm_printf(m, "\tIPEHR: 0x%08x\n",
1386 I915_READ(RING_IPEHR(engine->mmio_base)));
1387 } else {
1388 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
1389 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
1390 }
f636edb2 1391
fb5c551a 1392 if (HAS_EXECLISTS(dev_priv)) {
0ca88ba0
CW
1393 const u32 *hws =
1394 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
f636edb2 1395 unsigned int idx;
df4f94e8 1396 u8 read, write;
f636edb2
CW
1397
1398 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1399 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1400 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1401
df4f94e8
CW
1402 read = execlists->csb_head;
1403 write = READ_ONCE(*execlists->csb_write);
1404
1405 drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
1406 read, write,
1407 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
90408713
CW
1408 yesno(test_bit(TASKLET_STATE_SCHED,
1409 &engine->execlists.tasklet.state)),
1410 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
f636edb2
CW
1411 if (read >= GEN8_CSB_ENTRIES)
1412 read = 0;
1413 if (write >= GEN8_CSB_ENTRIES)
1414 write = 0;
1415 if (read > write)
1416 write += GEN8_CSB_ENTRIES;
1417 while (read < write) {
1418 idx = ++read % GEN8_CSB_ENTRIES;
df4f94e8 1419 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
f636edb2 1420 idx,
f636edb2 1421 hws[idx * 2],
df4f94e8
CW
1422 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1423 hws[idx * 2 + 1],
1424 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
f636edb2
CW
1425 }
1426
1427 rcu_read_lock();
1428 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
e61e0f51 1429 struct i915_request *rq;
f636edb2
CW
1430 unsigned int count;
1431
1432 rq = port_unpack(&execlists->port[idx], &count);
1433 if (rq) {
3ceda3a4
CW
1434 char hdr[80];
1435
e8a70cab 1436 snprintf(hdr, sizeof(hdr),
5013eb8c 1437 "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
3a068721 1438 idx, count,
5013eb8c
CW
1439 i915_ggtt_offset(rq->ring->vma),
1440 rq->timeline->hwsp_offset);
e8a70cab 1441 print_request(m, rq, hdr);
f636edb2 1442 } else {
e8a70cab 1443 drm_printf(m, "\t\tELSP[%d] idle\n", idx);
f636edb2
CW
1444 }
1445 }
4a118ecb 1446 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
f636edb2 1447 rcu_read_unlock();
f636edb2
CW
1448 } else if (INTEL_GEN(dev_priv) > 6) {
1449 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1450 I915_READ(RING_PP_DIR_BASE(engine)));
1451 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1452 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1453 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1454 I915_READ(RING_PP_DIR_DCLV(engine)));
1455 }
3ceda3a4
CW
1456}
1457
83c31783
CW
1458static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1459{
1460 void *ring;
1461 int size;
1462
1463 drm_printf(m,
1464 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1465 rq->head, rq->postfix, rq->tail,
1466 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1467 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1468
1469 size = rq->tail - rq->head;
1470 if (rq->tail < rq->head)
1471 size += rq->ring->size;
1472
1473 ring = kmalloc(size, GFP_ATOMIC);
1474 if (ring) {
1475 const void *vaddr = rq->ring->vaddr;
1476 unsigned int head = rq->head;
1477 unsigned int len = 0;
1478
1479 if (rq->tail < head) {
1480 len = rq->ring->size - head;
1481 memcpy(ring, vaddr + head, len);
1482 head = 0;
1483 }
1484 memcpy(ring + len, vaddr + head, size - len);
1485
1486 hexdump(m, ring, size);
1487 kfree(ring);
1488 }
1489}
1490
3ceda3a4
CW
1491void intel_engine_dump(struct intel_engine_cs *engine,
1492 struct drm_printer *m,
1493 const char *header, ...)
1494{
1495 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
3ceda3a4 1496 struct i915_gpu_error * const error = &engine->i915->gpu_error;
0212bdef 1497 struct i915_request *rq;
538ef96b 1498 intel_wakeref_t wakeref;
d6d12ec0 1499 unsigned long flags;
3ceda3a4
CW
1500 struct rb_node *rb;
1501
1502 if (header) {
1503 va_list ap;
1504
1505 va_start(ap, header);
1506 drm_vprintf(m, header, &ap);
1507 va_end(ap);
1508 }
1509
1510 if (i915_terminally_wedged(&engine->i915->gpu_error))
1511 drm_printf(m, "*** WEDGED ***\n");
1512
52d7f16e 1513 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
3ceda3a4
CW
1514 intel_engine_get_seqno(engine),
1515 intel_engine_last_submit(engine),
1516 engine->hangcheck.seqno,
52d7f16e 1517 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
3ceda3a4
CW
1518 drm_printf(m, "\tReset count: %d (global %d)\n",
1519 i915_reset_engine_count(error, engine),
1520 i915_reset_count(error));
1521
1522 rcu_read_lock();
1523
1524 drm_printf(m, "\tRequests:\n");
1525
a89d1f92 1526 rq = list_first_entry(&engine->timeline.requests,
e61e0f51 1527 struct i915_request, link);
a89d1f92 1528 if (&rq->link != &engine->timeline.requests)
3ceda3a4
CW
1529 print_request(m, rq, "\t\tfirst ");
1530
a89d1f92 1531 rq = list_last_entry(&engine->timeline.requests,
e61e0f51 1532 struct i915_request, link);
a89d1f92 1533 if (&rq->link != &engine->timeline.requests)
3ceda3a4
CW
1534 print_request(m, rq, "\t\tlast ");
1535
1536 rq = i915_gem_find_active_request(engine);
1537 if (rq) {
1538 print_request(m, rq, "\t\tactive ");
83c31783 1539
ef5032a0 1540 drm_printf(m, "\t\tring->start: 0x%08x\n",
3ceda3a4 1541 i915_ggtt_offset(rq->ring->vma));
ef5032a0 1542 drm_printf(m, "\t\tring->head: 0x%08x\n",
3ceda3a4 1543 rq->ring->head);
ef5032a0 1544 drm_printf(m, "\t\tring->tail: 0x%08x\n",
3ceda3a4 1545 rq->ring->tail);
ef5032a0
CW
1546 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1547 rq->ring->emit);
1548 drm_printf(m, "\t\tring->space: 0x%08x\n",
1549 rq->ring->space);
5013eb8c
CW
1550 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1551 rq->timeline->hwsp_offset);
83c31783
CW
1552
1553 print_request_ring(m, rq);
3ceda3a4
CW
1554 }
1555
1556 rcu_read_unlock();
1557
538ef96b
CW
1558 wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
1559 if (wakeref) {
3ceda3a4 1560 intel_engine_print_registers(engine, m);
538ef96b 1561 intel_runtime_pm_put(engine->i915, wakeref);
3ceda3a4
CW
1562 } else {
1563 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1564 }
f636edb2 1565
0212bdef 1566 intel_execlists_show_requests(engine, m, print_request, 8);
a27d5a44 1567
0212bdef 1568 spin_lock_irqsave(&b->rb_lock, flags);
f636edb2
CW
1569 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1570 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1571
aa6a65da
CW
1572 drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
1573 w->tsk->comm, w->tsk->pid,
1574 task_state_to_char(w->tsk),
1575 w->seqno);
f636edb2 1576 }
0212bdef 1577 spin_unlock_irqrestore(&b->rb_lock, flags);
f636edb2 1578
c1bf2728 1579 drm_printf(m, "HWSP:\n");
0ca88ba0 1580 hexdump(m, engine->status_page.addr, PAGE_SIZE);
c1bf2728 1581
c400cc2a 1582 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
f636edb2
CW
1583}
1584
b46a33e2
TU
1585static u8 user_class_map[] = {
1586 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
1587 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
1588 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
1589 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
1590};
1591
1592struct intel_engine_cs *
1593intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
1594{
1595 if (class >= ARRAY_SIZE(user_class_map))
1596 return NULL;
1597
1598 class = user_class_map[class];
1599
1600 GEM_BUG_ON(class > MAX_ENGINE_CLASS);
1601
1602 if (instance > MAX_ENGINE_INSTANCE)
1603 return NULL;
1604
1605 return i915->engine_class[class][instance];
1606}
1607
30e17b78
TU
1608/**
1609 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1610 * @engine: engine to enable stats collection
1611 *
1612 * Start collecting the engine busyness data for @engine.
1613 *
1614 * Returns 0 on success or a negative error code.
1615 */
1616int intel_enable_engine_stats(struct intel_engine_cs *engine)
1617{
99e48bf9 1618 struct intel_engine_execlists *execlists = &engine->execlists;
30e17b78 1619 unsigned long flags;
99e48bf9 1620 int err = 0;
30e17b78 1621
cf669b4e 1622 if (!intel_engine_supports_stats(engine))
30e17b78
TU
1623 return -ENODEV;
1624
9512f985
CW
1625 spin_lock_irqsave(&engine->timeline.lock, flags);
1626 write_seqlock(&engine->stats.lock);
99e48bf9
CW
1627
1628 if (unlikely(engine->stats.enabled == ~0)) {
1629 err = -EBUSY;
1630 goto unlock;
1631 }
1632
4900727d 1633 if (engine->stats.enabled++ == 0) {
4900727d
CW
1634 const struct execlist_port *port = execlists->port;
1635 unsigned int num_ports = execlists_num_ports(execlists);
1636
30e17b78 1637 engine->stats.enabled_at = ktime_get();
4900727d
CW
1638
1639 /* XXX submission method oblivious? */
1640 while (num_ports-- && port_isset(port)) {
1641 engine->stats.active++;
1642 port++;
1643 }
1644
1645 if (engine->stats.active)
1646 engine->stats.start = engine->stats.enabled_at;
1647 }
30e17b78 1648
99e48bf9 1649unlock:
9512f985
CW
1650 write_sequnlock(&engine->stats.lock);
1651 spin_unlock_irqrestore(&engine->timeline.lock, flags);
30e17b78 1652
99e48bf9 1653 return err;
30e17b78
TU
1654}
1655
1656static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1657{
1658 ktime_t total = engine->stats.total;
1659
1660 /*
1661 * If the engine is executing something at the moment
1662 * add it to the total.
1663 */
1664 if (engine->stats.active)
1665 total = ktime_add(total,
1666 ktime_sub(ktime_get(), engine->stats.start));
1667
1668 return total;
1669}
1670
1671/**
1672 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1673 * @engine: engine to report on
1674 *
1675 * Returns accumulated time @engine was busy since engine stats were enabled.
1676 */
1677ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1678{
741258cd 1679 unsigned int seq;
30e17b78 1680 ktime_t total;
30e17b78 1681
741258cd
TU
1682 do {
1683 seq = read_seqbegin(&engine->stats.lock);
1684 total = __intel_engine_get_busy_time(engine);
1685 } while (read_seqretry(&engine->stats.lock, seq));
30e17b78
TU
1686
1687 return total;
1688}
1689
1690/**
1691 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1692 * @engine: engine to disable stats collection
1693 *
1694 * Stops collecting the engine busyness data for @engine.
1695 */
1696void intel_disable_engine_stats(struct intel_engine_cs *engine)
1697{
1698 unsigned long flags;
1699
cf669b4e 1700 if (!intel_engine_supports_stats(engine))
30e17b78
TU
1701 return;
1702
741258cd 1703 write_seqlock_irqsave(&engine->stats.lock, flags);
30e17b78
TU
1704 WARN_ON_ONCE(engine->stats.enabled == 0);
1705 if (--engine->stats.enabled == 0) {
1706 engine->stats.total = __intel_engine_get_busy_time(engine);
1707 engine->stats.active = 0;
1708 }
741258cd 1709 write_sequnlock_irqrestore(&engine->stats.lock, flags);
30e17b78
TU
1710}
1711
f97fbf96
CW
1712#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1713#include "selftests/mock_engine.c"
74419daa 1714#include "selftests/intel_engine_cs.c"
f97fbf96 1715#endif