2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/highmem.h>
68 #include <linux/log2.h>
69 #include <linux/nospec.h>
71 #include <drm/drm_cache.h>
72 #include <drm/drm_syncobj.h>
74 #include "gt/gen6_ppgtt.h"
75 #include "gt/intel_context.h"
76 #include "gt/intel_context_param.h"
77 #include "gt/intel_engine_heartbeat.h"
78 #include "gt/intel_engine_user.h"
79 #include "gt/intel_gpu_commands.h"
80 #include "gt/intel_ring.h"
82 #include "pxp/intel_pxp.h"
84 #include "i915_file_private.h"
85 #include "i915_gem_context.h"
86 #include "i915_trace.h"
87 #include "i915_user_extensions.h"
89 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
91 static struct kmem_cache *slab_luts;
93 struct i915_lut_handle *i915_lut_handle_alloc(void)
95 return kmem_cache_alloc(slab_luts, GFP_KERNEL);
98 void i915_lut_handle_free(struct i915_lut_handle *lut)
100 return kmem_cache_free(slab_luts, lut);
103 static void lut_close(struct i915_gem_context *ctx)
105 struct radix_tree_iter iter;
108 mutex_lock(&ctx->lut_mutex);
110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111 struct i915_vma *vma = rcu_dereference_raw(*slot);
112 struct drm_i915_gem_object *obj = vma->obj;
113 struct i915_lut_handle *lut;
115 if (!kref_get_unless_zero(&obj->base.refcount))
118 spin_lock(&obj->lut_lock);
119 list_for_each_entry(lut, &obj->lut_list, obj_link) {
123 if (lut->handle != iter.index)
126 list_del(&lut->obj_link);
129 spin_unlock(&obj->lut_lock);
131 if (&lut->obj_link != &obj->lut_list) {
132 i915_lut_handle_free(lut);
133 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
135 i915_gem_object_put(obj);
138 i915_gem_object_put(obj);
141 mutex_unlock(&ctx->lut_mutex);
144 static struct intel_context *
145 lookup_user_engine(struct i915_gem_context *ctx,
147 const struct i915_engine_class_instance *ci)
148 #define LOOKUP_USER_INDEX BIT(0)
152 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
153 return ERR_PTR(-EINVAL);
155 if (!i915_gem_context_user_engines(ctx)) {
156 struct intel_engine_cs *engine;
158 engine = intel_engine_lookup_user(ctx->i915,
160 ci->engine_instance);
162 return ERR_PTR(-EINVAL);
164 idx = engine->legacy_idx;
166 idx = ci->engine_instance;
169 return i915_gem_context_get_engine(ctx, idx);
172 static int validate_priority(struct drm_i915_private *i915,
173 const struct drm_i915_gem_context_param *args)
175 s64 priority = args->value;
180 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
183 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
184 priority < I915_CONTEXT_MIN_USER_PRIORITY)
187 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
188 !capable(CAP_SYS_NICE))
194 static void proto_context_close(struct drm_i915_private *i915,
195 struct i915_gem_proto_context *pc)
200 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
203 if (pc->user_engines) {
204 for (i = 0; i < pc->num_user_engines; i++)
205 kfree(pc->user_engines[i].siblings);
206 kfree(pc->user_engines);
211 static int proto_context_set_persistence(struct drm_i915_private *i915,
212 struct i915_gem_proto_context *pc,
217 * Only contexts that are short-lived [that will expire or be
218 * reset] are allowed to survive past termination. We require
219 * hangcheck to ensure that the persistent requests are healthy.
221 if (!i915->params.enable_hangcheck)
224 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
226 /* To cancel a context we use "preempt-to-idle" */
227 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
231 * If the cancel fails, we then need to reset, cleanly!
233 * If the per-engine reset fails, all hope is lost! We resort
234 * to a full GPU reset in that unlikely case, but realistically
235 * if the engine could not reset, the full reset does not fare
236 * much better. The damage has been done.
238 * However, if we cannot reset an engine by itself, we cannot
239 * cleanup a hanging persistent context without causing
240 * colateral damage, and we should not pretend we can by
241 * exposing the interface.
243 if (!intel_has_reset_engine(to_gt(i915)))
246 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
252 static int proto_context_set_protected(struct drm_i915_private *i915,
253 struct i915_gem_proto_context *pc,
259 pc->uses_protected_content = false;
260 } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
262 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
263 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
266 pc->uses_protected_content = true;
269 * protected context usage requires the PXP session to be up,
270 * which in turn requires the device to be active.
272 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
274 if (!intel_pxp_is_active(&to_gt(i915)->pxp))
275 ret = intel_pxp_start(&to_gt(i915)->pxp);
281 static struct i915_gem_proto_context *
282 proto_context_create(struct drm_i915_private *i915, unsigned int flags)
284 struct i915_gem_proto_context *pc, *err;
286 pc = kzalloc(sizeof(*pc), GFP_KERNEL);
288 return ERR_PTR(-ENOMEM);
290 pc->num_user_engines = -1;
291 pc->user_engines = NULL;
292 pc->user_flags = BIT(UCONTEXT_BANNABLE) |
293 BIT(UCONTEXT_RECOVERABLE);
294 if (i915->params.enable_hangcheck)
295 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
296 pc->sched.priority = I915_PRIORITY_NORMAL;
298 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
299 if (!HAS_EXECLISTS(i915)) {
300 err = ERR_PTR(-EINVAL);
303 pc->single_timeline = true;
309 proto_context_close(i915, pc);
313 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
314 struct i915_gem_proto_context *pc,
320 lockdep_assert_held(&fpriv->proto_context_lock);
322 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
326 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
327 if (xa_is_err(old)) {
328 xa_erase(&fpriv->context_xa, *id);
336 static int proto_context_register(struct drm_i915_file_private *fpriv,
337 struct i915_gem_proto_context *pc,
342 mutex_lock(&fpriv->proto_context_lock);
343 ret = proto_context_register_locked(fpriv, pc, id);
344 mutex_unlock(&fpriv->proto_context_lock);
349 static struct i915_address_space *
350 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
352 struct i915_address_space *vm;
354 xa_lock(&file_priv->vm_xa);
355 vm = xa_load(&file_priv->vm_xa, id);
358 xa_unlock(&file_priv->vm_xa);
363 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
364 struct i915_gem_proto_context *pc,
365 const struct drm_i915_gem_context_param *args)
367 struct drm_i915_private *i915 = fpriv->dev_priv;
368 struct i915_address_space *vm;
373 if (!HAS_FULL_PPGTT(i915))
376 if (upper_32_bits(args->value))
379 vm = i915_gem_vm_lookup(fpriv, args->value);
390 struct set_proto_ctx_engines {
391 struct drm_i915_private *i915;
392 unsigned num_engines;
393 struct i915_gem_proto_engine *engines;
397 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
400 struct i915_context_engines_load_balance __user *ext =
401 container_of_user(base, typeof(*ext), base);
402 const struct set_proto_ctx_engines *set = data;
403 struct drm_i915_private *i915 = set->i915;
404 struct intel_engine_cs **siblings;
405 u16 num_siblings, idx;
409 if (!HAS_EXECLISTS(i915))
412 if (get_user(idx, &ext->engine_index))
415 if (idx >= set->num_engines) {
416 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
417 idx, set->num_engines);
421 idx = array_index_nospec(idx, set->num_engines);
422 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
424 "Invalid placement[%d], already occupied\n", idx);
428 if (get_user(num_siblings, &ext->num_siblings))
431 err = check_user_mbz(&ext->flags);
435 err = check_user_mbz(&ext->mbz64);
439 if (num_siblings == 0)
442 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
446 for (n = 0; n < num_siblings; n++) {
447 struct i915_engine_class_instance ci;
449 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
454 siblings[n] = intel_engine_lookup_user(i915,
459 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
460 n, ci.engine_class, ci.engine_instance);
466 if (num_siblings == 1) {
467 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
468 set->engines[idx].engine = siblings[0];
471 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
472 set->engines[idx].num_siblings = num_siblings;
473 set->engines[idx].siblings = siblings;
485 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
487 struct i915_context_engines_bond __user *ext =
488 container_of_user(base, typeof(*ext), base);
489 const struct set_proto_ctx_engines *set = data;
490 struct drm_i915_private *i915 = set->i915;
491 struct i915_engine_class_instance ci;
492 struct intel_engine_cs *master;
496 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
497 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
499 "Bonding not supported on this platform\n");
503 if (get_user(idx, &ext->virtual_index))
506 if (idx >= set->num_engines) {
508 "Invalid index for virtual engine: %d >= %d\n",
509 idx, set->num_engines);
513 idx = array_index_nospec(idx, set->num_engines);
514 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
515 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
519 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
521 "Bonding with virtual engines not allowed\n");
525 err = check_user_mbz(&ext->flags);
529 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
530 err = check_user_mbz(&ext->mbz64[n]);
535 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
538 master = intel_engine_lookup_user(i915,
543 "Unrecognised master engine: { class:%u, instance:%u }\n",
544 ci.engine_class, ci.engine_instance);
548 if (intel_engine_uses_guc(master)) {
549 DRM_DEBUG("bonding extension not supported with GuC submission");
553 if (get_user(num_bonds, &ext->num_bonds))
556 for (n = 0; n < num_bonds; n++) {
557 struct intel_engine_cs *bond;
559 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
562 bond = intel_engine_lookup_user(i915,
567 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
568 n, ci.engine_class, ci.engine_instance);
577 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
580 struct i915_context_engines_parallel_submit __user *ext =
581 container_of_user(base, typeof(*ext), base);
582 const struct set_proto_ctx_engines *set = data;
583 struct drm_i915_private *i915 = set->i915;
584 struct i915_engine_class_instance prev_engine;
586 int err = 0, n, i, j;
587 u16 slot, width, num_siblings;
588 struct intel_engine_cs **siblings = NULL;
589 intel_engine_mask_t prev_mask;
591 if (get_user(slot, &ext->engine_index))
594 if (get_user(width, &ext->width))
597 if (get_user(num_siblings, &ext->num_siblings))
600 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
602 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
607 if (slot >= set->num_engines) {
608 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
609 slot, set->num_engines);
613 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
615 "Invalid placement[%d], already occupied\n", slot);
619 if (get_user(flags, &ext->flags))
623 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
627 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
628 err = check_user_mbz(&ext->mbz64[n]);
634 drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
638 if (num_siblings < 1) {
639 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
644 siblings = kmalloc_array(num_siblings * width,
650 /* Create contexts / engines */
651 for (i = 0; i < width; ++i) {
652 intel_engine_mask_t current_mask = 0;
654 for (j = 0; j < num_siblings; ++j) {
655 struct i915_engine_class_instance ci;
657 n = i * num_siblings + j;
658 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
664 intel_engine_lookup_user(i915, ci.engine_class,
668 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
669 n, ci.engine_class, ci.engine_instance);
675 * We don't support breadcrumb handshake on these
678 if (siblings[n]->class == RENDER_CLASS ||
679 siblings[n]->class == COMPUTE_CLASS) {
685 if (prev_engine.engine_class !=
688 "Mismatched class %d, %d\n",
689 prev_engine.engine_class,
697 current_mask |= siblings[n]->logical_mask;
701 if (current_mask != prev_mask << 1) {
703 "Non contiguous logical mask 0x%x, 0x%x\n",
704 prev_mask, current_mask);
709 prev_mask = current_mask;
712 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
713 set->engines[slot].num_siblings = num_siblings;
714 set->engines[slot].width = width;
715 set->engines[slot].siblings = siblings;
725 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
726 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
727 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
728 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
729 set_proto_ctx_engines_parallel_submit,
732 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
733 struct i915_gem_proto_context *pc,
734 const struct drm_i915_gem_context_param *args)
736 struct drm_i915_private *i915 = fpriv->dev_priv;
737 struct set_proto_ctx_engines set = { .i915 = i915 };
738 struct i915_context_param_engines __user *user =
739 u64_to_user_ptr(args->value);
744 if (pc->num_user_engines >= 0) {
745 drm_dbg(&i915->drm, "Cannot set engines twice");
749 if (args->size < sizeof(*user) ||
750 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
751 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
756 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
757 /* RING_MASK has no shift so we can use it directly here */
758 if (set.num_engines > I915_EXEC_RING_MASK + 1)
761 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
765 for (n = 0; n < set.num_engines; n++) {
766 struct i915_engine_class_instance ci;
767 struct intel_engine_cs *engine;
769 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
774 memset(&set.engines[n], 0, sizeof(set.engines[n]));
776 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
777 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
780 engine = intel_engine_lookup_user(i915,
785 "Invalid engine[%d]: { class:%d, instance:%d }\n",
786 n, ci.engine_class, ci.engine_instance);
791 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
792 set.engines[n].engine = engine;
796 if (!get_user(extensions, &user->extensions))
797 err = i915_user_extensions(u64_to_user_ptr(extensions),
798 set_proto_ctx_engines_extensions,
799 ARRAY_SIZE(set_proto_ctx_engines_extensions),
806 pc->num_user_engines = set.num_engines;
807 pc->user_engines = set.engines;
812 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
813 struct i915_gem_proto_context *pc,
814 struct drm_i915_gem_context_param *args)
816 struct drm_i915_private *i915 = fpriv->dev_priv;
817 struct drm_i915_gem_context_param_sseu user_sseu;
818 struct intel_sseu *sseu;
821 if (args->size < sizeof(user_sseu))
824 if (GRAPHICS_VER(i915) != 11)
827 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
834 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
837 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
840 if (pc->num_user_engines >= 0) {
841 int idx = user_sseu.engine.engine_instance;
842 struct i915_gem_proto_engine *pe;
844 if (idx >= pc->num_user_engines)
847 pe = &pc->user_engines[idx];
849 /* Only render engine supports RPCS configuration. */
850 if (pe->engine->class != RENDER_CLASS)
855 /* Only render engine supports RPCS configuration. */
856 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
859 /* There is only one render engine */
860 if (user_sseu.engine.engine_instance != 0)
863 sseu = &pc->legacy_rcs_sseu;
866 ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
870 args->size = sizeof(user_sseu);
875 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
876 struct i915_gem_proto_context *pc,
877 struct drm_i915_gem_context_param *args)
881 switch (args->param) {
882 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
885 else if (args->value)
886 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
888 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
891 case I915_CONTEXT_PARAM_BANNABLE:
894 else if (!capable(CAP_SYS_ADMIN) && !args->value)
896 else if (args->value)
897 pc->user_flags |= BIT(UCONTEXT_BANNABLE);
898 else if (pc->uses_protected_content)
901 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
904 case I915_CONTEXT_PARAM_RECOVERABLE:
907 else if (!args->value)
908 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
909 else if (pc->uses_protected_content)
912 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
915 case I915_CONTEXT_PARAM_PRIORITY:
916 ret = validate_priority(fpriv->dev_priv, args);
918 pc->sched.priority = args->value;
921 case I915_CONTEXT_PARAM_SSEU:
922 ret = set_proto_ctx_sseu(fpriv, pc, args);
925 case I915_CONTEXT_PARAM_VM:
926 ret = set_proto_ctx_vm(fpriv, pc, args);
929 case I915_CONTEXT_PARAM_ENGINES:
930 ret = set_proto_ctx_engines(fpriv, pc, args);
933 case I915_CONTEXT_PARAM_PERSISTENCE:
936 ret = proto_context_set_persistence(fpriv->dev_priv, pc,
940 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
941 ret = proto_context_set_protected(fpriv->dev_priv, pc,
945 case I915_CONTEXT_PARAM_NO_ZEROMAP:
946 case I915_CONTEXT_PARAM_BAN_PERIOD:
947 case I915_CONTEXT_PARAM_RINGSIZE:
956 static int intel_context_set_gem(struct intel_context *ce,
957 struct i915_gem_context *ctx,
958 struct intel_sseu sseu)
962 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
963 RCU_INIT_POINTER(ce->gem_context, ctx);
965 GEM_BUG_ON(intel_context_is_pinned(ce));
966 ce->ring_size = SZ_16K;
969 ce->vm = i915_gem_context_get_eb_vm(ctx);
971 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
972 intel_engine_has_timeslices(ce->engine) &&
973 intel_engine_has_semaphores(ce->engine))
974 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
976 if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
977 ctx->i915->params.request_timeout_ms) {
978 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
980 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
983 /* A valid SSEU has no zero fields */
984 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
985 ret = intel_context_reconfigure_sseu(ce, sseu);
990 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
993 struct intel_context *ce = e->engines[count], *child;
995 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
998 for_each_child(ce, child)
999 intel_context_unpin(child);
1000 intel_context_unpin(ce);
1004 static void unpin_engines(struct i915_gem_engines *e)
1006 __unpin_engines(e, e->num_engines);
1009 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1012 if (!e->engines[count])
1015 intel_context_put(e->engines[count]);
1020 static void free_engines(struct i915_gem_engines *e)
1022 __free_engines(e, e->num_engines);
1025 static void free_engines_rcu(struct rcu_head *rcu)
1027 struct i915_gem_engines *engines =
1028 container_of(rcu, struct i915_gem_engines, rcu);
1030 i915_sw_fence_fini(&engines->fence);
1031 free_engines(engines);
1035 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1037 struct i915_gem_engines *engines =
1038 container_of(fence, typeof(*engines), fence);
1041 case FENCE_COMPLETE:
1042 if (!list_empty(&engines->link)) {
1043 struct i915_gem_context *ctx = engines->ctx;
1044 unsigned long flags;
1046 spin_lock_irqsave(&ctx->stale.lock, flags);
1047 list_del(&engines->link);
1048 spin_unlock_irqrestore(&ctx->stale.lock, flags);
1050 i915_gem_context_put(engines->ctx);
1054 init_rcu_head(&engines->rcu);
1055 call_rcu(&engines->rcu, free_engines_rcu);
1062 static struct i915_gem_engines *alloc_engines(unsigned int count)
1064 struct i915_gem_engines *e;
1066 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1070 i915_sw_fence_init(&e->fence, engines_notify);
1074 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1075 struct intel_sseu rcs_sseu)
1077 const struct intel_gt *gt = to_gt(ctx->i915);
1078 struct intel_engine_cs *engine;
1079 struct i915_gem_engines *e, *err;
1080 enum intel_engine_id id;
1082 e = alloc_engines(I915_NUM_ENGINES);
1084 return ERR_PTR(-ENOMEM);
1086 for_each_engine(engine, gt, id) {
1087 struct intel_context *ce;
1088 struct intel_sseu sseu = {};
1091 if (engine->legacy_idx == INVALID_ENGINE)
1094 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1095 GEM_BUG_ON(e->engines[engine->legacy_idx]);
1097 ce = intel_context_create(engine);
1103 e->engines[engine->legacy_idx] = ce;
1104 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1106 if (engine->class == RENDER_CLASS)
1109 ret = intel_context_set_gem(ce, ctx, sseu);
1124 static int perma_pin_contexts(struct intel_context *ce)
1126 struct intel_context *child;
1127 int i = 0, j = 0, ret;
1129 GEM_BUG_ON(!intel_context_is_parent(ce));
1131 ret = intel_context_pin(ce);
1135 for_each_child(ce, child) {
1136 ret = intel_context_pin(child);
1142 set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1147 intel_context_unpin(ce);
1148 for_each_child(ce, child) {
1150 intel_context_unpin(child);
1158 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1159 unsigned int num_engines,
1160 struct i915_gem_proto_engine *pe)
1162 struct i915_gem_engines *e, *err;
1165 e = alloc_engines(num_engines);
1167 return ERR_PTR(-ENOMEM);
1168 e->num_engines = num_engines;
1170 for (n = 0; n < num_engines; n++) {
1171 struct intel_context *ce, *child;
1174 switch (pe[n].type) {
1175 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1176 ce = intel_context_create(pe[n].engine);
1179 case I915_GEM_ENGINE_TYPE_BALANCED:
1180 ce = intel_engine_create_virtual(pe[n].siblings,
1181 pe[n].num_siblings, 0);
1184 case I915_GEM_ENGINE_TYPE_PARALLEL:
1185 ce = intel_engine_create_parallel(pe[n].siblings,
1190 case I915_GEM_ENGINE_TYPE_INVALID:
1192 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1203 ret = intel_context_set_gem(ce, ctx, pe->sseu);
1208 for_each_child(ce, child) {
1209 ret = intel_context_set_gem(child, ctx, pe->sseu);
1217 * XXX: Must be done after calling intel_context_set_gem as that
1218 * function changes the ring size. The ring is allocated when
1219 * the context is pinned. If the ring size is changed after
1220 * allocation we have a mismatch of the ring size and will cause
1221 * the context to hang. Presumably with a bit of reordering we
1222 * could move the perma-pin step to the backend function
1223 * intel_engine_create_parallel.
1225 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1226 ret = perma_pin_contexts(ce);
1241 static void i915_gem_context_release_work(struct work_struct *work)
1243 struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1245 struct i915_address_space *vm;
1247 trace_i915_context_free(ctx);
1248 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1251 drm_syncobj_put(ctx->syncobj);
1257 if (ctx->pxp_wakeref)
1258 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1260 mutex_destroy(&ctx->engines_mutex);
1261 mutex_destroy(&ctx->lut_mutex);
1264 mutex_destroy(&ctx->mutex);
1266 kfree_rcu(ctx, rcu);
1269 void i915_gem_context_release(struct kref *ref)
1271 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1273 queue_work(ctx->i915->wq, &ctx->release_work);
1276 static inline struct i915_gem_engines *
1277 __context_engines_static(const struct i915_gem_context *ctx)
1279 return rcu_dereference_protected(ctx->engines, true);
1282 static void __reset_context(struct i915_gem_context *ctx,
1283 struct intel_engine_cs *engine)
1285 intel_gt_handle_error(engine->gt, engine->mask, 0,
1286 "context closure in %s", ctx->name);
1289 static bool __cancel_engine(struct intel_engine_cs *engine)
1292 * Send a "high priority pulse" down the engine to cause the
1293 * current request to be momentarily preempted. (If it fails to
1294 * be preempted, it will be reset). As we have marked our context
1295 * as banned, any incomplete request, including any running, will
1296 * be skipped following the preemption.
1298 * If there is no hangchecking (one of the reasons why we try to
1299 * cancel the context) and no forced preemption, there may be no
1300 * means by which we reset the GPU and evict the persistent hog.
1301 * Ergo if we are unable to inject a preemptive pulse that can
1302 * kill the banned context, we fallback to doing a local reset
1305 return intel_engine_pulse(engine) == 0;
1308 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1310 struct intel_engine_cs *engine = NULL;
1311 struct i915_request *rq;
1313 if (intel_context_has_inflight(ce))
1314 return intel_context_inflight(ce);
1320 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1321 * to the request to prevent it being transferred to a new timeline
1322 * (and onto a new timeline->requests list).
1325 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1328 /* timeline is already completed upto this point? */
1329 if (!i915_request_get_rcu(rq))
1332 /* Check with the backend if the request is inflight */
1334 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1335 found = i915_request_active_engine(rq, &engine);
1337 i915_request_put(rq);
1346 static void kill_engines(struct i915_gem_engines *engines, bool ban)
1348 struct i915_gem_engines_iter it;
1349 struct intel_context *ce;
1352 * Map the user's engine back to the actual engines; one virtual
1353 * engine will be mapped to multiple engines, and using ctx->engine[]
1354 * the same engine may be have multiple instances in the user's map.
1355 * However, we only care about pending requests, so only include
1356 * engines on which there are incomplete requests.
1358 for_each_gem_engine(ce, engines, it) {
1359 struct intel_engine_cs *engine;
1361 if (ban && intel_context_ban(ce, NULL))
1365 * Check the current active state of this context; if we
1366 * are currently executing on the GPU we need to evict
1367 * ourselves. On the other hand, if we haven't yet been
1368 * submitted to the GPU or if everything is complete,
1369 * we have nothing to do.
1371 engine = active_engine(ce);
1373 /* First attempt to gracefully cancel the context */
1374 if (engine && !__cancel_engine(engine) && ban)
1376 * If we are unable to send a preemptive pulse to bump
1377 * the context from the GPU, we have to resort to a full
1378 * reset. We hope the collateral damage is worth it.
1380 __reset_context(engines->ctx, engine);
1384 static void kill_context(struct i915_gem_context *ctx)
1386 bool ban = (!i915_gem_context_is_persistent(ctx) ||
1387 !ctx->i915->params.enable_hangcheck);
1388 struct i915_gem_engines *pos, *next;
1390 spin_lock_irq(&ctx->stale.lock);
1391 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1392 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1393 if (!i915_sw_fence_await(&pos->fence)) {
1394 list_del_init(&pos->link);
1398 spin_unlock_irq(&ctx->stale.lock);
1400 kill_engines(pos, ban);
1402 spin_lock_irq(&ctx->stale.lock);
1403 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1404 list_safe_reset_next(pos, next, link);
1405 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1407 i915_sw_fence_complete(&pos->fence);
1409 spin_unlock_irq(&ctx->stale.lock);
1412 static void engines_idle_release(struct i915_gem_context *ctx,
1413 struct i915_gem_engines *engines)
1415 struct i915_gem_engines_iter it;
1416 struct intel_context *ce;
1418 INIT_LIST_HEAD(&engines->link);
1420 engines->ctx = i915_gem_context_get(ctx);
1422 for_each_gem_engine(ce, engines, it) {
1425 /* serialises with execbuf */
1426 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1427 if (!intel_context_pin_if_active(ce))
1430 /* Wait until context is finally scheduled out and retired */
1431 err = i915_sw_fence_await_active(&engines->fence,
1433 I915_ACTIVE_AWAIT_BARRIER);
1434 intel_context_unpin(ce);
1439 spin_lock_irq(&ctx->stale.lock);
1440 if (!i915_gem_context_is_closed(ctx))
1441 list_add_tail(&engines->link, &ctx->stale.engines);
1442 spin_unlock_irq(&ctx->stale.lock);
1445 if (list_empty(&engines->link)) /* raced, already closed */
1446 kill_engines(engines, true);
1448 i915_sw_fence_commit(&engines->fence);
1451 static void set_closed_name(struct i915_gem_context *ctx)
1455 /* Replace '[]' with '<>' to indicate closed in debug prints */
1457 s = strrchr(ctx->name, '[');
1463 s = strchr(s + 1, ']');
1468 static void context_close(struct i915_gem_context *ctx)
1470 struct i915_address_space *vm;
1472 /* Flush any concurrent set_engines() */
1473 mutex_lock(&ctx->engines_mutex);
1474 unpin_engines(__context_engines_static(ctx));
1475 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1476 i915_gem_context_set_closed(ctx);
1477 mutex_unlock(&ctx->engines_mutex);
1479 mutex_lock(&ctx->mutex);
1481 set_closed_name(ctx);
1485 /* i915_vm_close drops the final reference, which is a bit too
1486 * early and could result in surprises with concurrent
1487 * operations racing with thist ctx close. Keep a full reference
1494 ctx->file_priv = ERR_PTR(-EBADF);
1497 * The LUT uses the VMA as a backpointer to unref the object,
1498 * so we need to clear the LUT before we close all the VMA (inside
1503 spin_lock(&ctx->i915->gem.contexts.lock);
1504 list_del(&ctx->link);
1505 spin_unlock(&ctx->i915->gem.contexts.lock);
1507 mutex_unlock(&ctx->mutex);
1510 * If the user has disabled hangchecking, we can not be sure that
1511 * the batches will ever complete after the context is closed,
1512 * keeping the context and all resources pinned forever. So in this
1513 * case we opt to forcibly kill off all remaining requests on
1518 i915_gem_context_put(ctx);
1521 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1523 if (i915_gem_context_is_persistent(ctx) == state)
1528 * Only contexts that are short-lived [that will expire or be
1529 * reset] are allowed to survive past termination. We require
1530 * hangcheck to ensure that the persistent requests are healthy.
1532 if (!ctx->i915->params.enable_hangcheck)
1535 i915_gem_context_set_persistence(ctx);
1537 /* To cancel a context we use "preempt-to-idle" */
1538 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1542 * If the cancel fails, we then need to reset, cleanly!
1544 * If the per-engine reset fails, all hope is lost! We resort
1545 * to a full GPU reset in that unlikely case, but realistically
1546 * if the engine could not reset, the full reset does not fare
1547 * much better. The damage has been done.
1549 * However, if we cannot reset an engine by itself, we cannot
1550 * cleanup a hanging persistent context without causing
1551 * colateral damage, and we should not pretend we can by
1552 * exposing the interface.
1554 if (!intel_has_reset_engine(to_gt(ctx->i915)))
1557 i915_gem_context_clear_persistence(ctx);
1563 static struct i915_gem_context *
1564 i915_gem_create_context(struct drm_i915_private *i915,
1565 const struct i915_gem_proto_context *pc)
1567 struct i915_gem_context *ctx;
1568 struct i915_address_space *vm = NULL;
1569 struct i915_gem_engines *e;
1573 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1575 return ERR_PTR(-ENOMEM);
1577 kref_init(&ctx->ref);
1579 ctx->sched = pc->sched;
1580 mutex_init(&ctx->mutex);
1581 INIT_LIST_HEAD(&ctx->link);
1582 INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1584 spin_lock_init(&ctx->stale.lock);
1585 INIT_LIST_HEAD(&ctx->stale.engines);
1588 vm = i915_vm_get(pc->vm);
1589 } else if (HAS_FULL_PPGTT(i915)) {
1590 struct i915_ppgtt *ppgtt;
1592 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1593 if (IS_ERR(ppgtt)) {
1594 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1596 err = PTR_ERR(ppgtt);
1602 ctx->vm = i915_vm_open(vm);
1604 /* i915_vm_open() takes a reference */
1608 mutex_init(&ctx->engines_mutex);
1609 if (pc->num_user_engines >= 0) {
1610 i915_gem_context_set_user_engines(ctx);
1611 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1613 i915_gem_context_clear_user_engines(ctx);
1614 e = default_engines(ctx, pc->legacy_rcs_sseu);
1620 RCU_INIT_POINTER(ctx->engines, e);
1622 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1623 mutex_init(&ctx->lut_mutex);
1625 /* NB: Mark all slices as needing a remap so that when the context first
1626 * loads it will restore whatever remap state already exists. If there
1627 * is no remap info, it will be a NOP. */
1628 ctx->remap_slice = ALL_L3_SLICES(i915);
1630 ctx->user_flags = pc->user_flags;
1632 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1633 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1635 if (pc->single_timeline) {
1636 err = drm_syncobj_create(&ctx->syncobj,
1637 DRM_SYNCOBJ_CREATE_SIGNALED,
1643 if (pc->uses_protected_content) {
1644 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1645 ctx->uses_protected_content = true;
1648 trace_i915_context_create(ctx);
1656 i915_vm_close(ctx->vm);
1659 return ERR_PTR(err);
1662 static void init_contexts(struct i915_gem_contexts *gc)
1664 spin_lock_init(&gc->lock);
1665 INIT_LIST_HEAD(&gc->list);
1668 void i915_gem_init__contexts(struct drm_i915_private *i915)
1670 init_contexts(&i915->gem.contexts);
1673 static void gem_context_register(struct i915_gem_context *ctx,
1674 struct drm_i915_file_private *fpriv,
1677 struct drm_i915_private *i915 = ctx->i915;
1680 ctx->file_priv = fpriv;
1682 ctx->pid = get_task_pid(current, PIDTYPE_PID);
1683 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1684 current->comm, pid_nr(ctx->pid));
1686 /* And finally expose ourselves to userspace via the idr */
1687 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1690 spin_lock(&i915->gem.contexts.lock);
1691 list_add_tail(&ctx->link, &i915->gem.contexts.list);
1692 spin_unlock(&i915->gem.contexts.lock);
1695 int i915_gem_context_open(struct drm_i915_private *i915,
1696 struct drm_file *file)
1698 struct drm_i915_file_private *file_priv = file->driver_priv;
1699 struct i915_gem_proto_context *pc;
1700 struct i915_gem_context *ctx;
1703 mutex_init(&file_priv->proto_context_lock);
1704 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1706 /* 0 reserved for the default context */
1707 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1709 /* 0 reserved for invalid/unassigned ppgtt */
1710 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1712 pc = proto_context_create(i915, 0);
1718 ctx = i915_gem_create_context(i915, pc);
1719 proto_context_close(i915, pc);
1725 gem_context_register(ctx, file_priv, 0);
1730 xa_destroy(&file_priv->vm_xa);
1731 xa_destroy(&file_priv->context_xa);
1732 xa_destroy(&file_priv->proto_context_xa);
1733 mutex_destroy(&file_priv->proto_context_lock);
1737 void i915_gem_context_close(struct drm_file *file)
1739 struct drm_i915_file_private *file_priv = file->driver_priv;
1740 struct i915_gem_proto_context *pc;
1741 struct i915_address_space *vm;
1742 struct i915_gem_context *ctx;
1745 xa_for_each(&file_priv->proto_context_xa, idx, pc)
1746 proto_context_close(file_priv->dev_priv, pc);
1747 xa_destroy(&file_priv->proto_context_xa);
1748 mutex_destroy(&file_priv->proto_context_lock);
1750 xa_for_each(&file_priv->context_xa, idx, ctx)
1752 xa_destroy(&file_priv->context_xa);
1754 xa_for_each(&file_priv->vm_xa, idx, vm)
1756 xa_destroy(&file_priv->vm_xa);
1759 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1760 struct drm_file *file)
1762 struct drm_i915_private *i915 = to_i915(dev);
1763 struct drm_i915_gem_vm_control *args = data;
1764 struct drm_i915_file_private *file_priv = file->driver_priv;
1765 struct i915_ppgtt *ppgtt;
1769 if (!HAS_FULL_PPGTT(i915))
1775 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1777 return PTR_ERR(ppgtt);
1779 if (args->extensions) {
1780 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1787 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1788 xa_limit_32b, GFP_KERNEL);
1792 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1797 i915_vm_put(&ppgtt->vm);
1801 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1802 struct drm_file *file)
1804 struct drm_i915_file_private *file_priv = file->driver_priv;
1805 struct drm_i915_gem_vm_control *args = data;
1806 struct i915_address_space *vm;
1811 if (args->extensions)
1814 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1822 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1823 struct i915_gem_context *ctx,
1824 struct drm_i915_gem_context_param *args)
1826 struct i915_address_space *vm;
1830 if (!i915_gem_context_has_full_ppgtt(ctx))
1836 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1842 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1850 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1851 const struct drm_i915_gem_context_param_sseu *user,
1852 struct intel_sseu *context)
1854 const struct sseu_dev_info *device = >->info.sseu;
1855 struct drm_i915_private *i915 = gt->i915;
1857 /* No zeros in any field. */
1858 if (!user->slice_mask || !user->subslice_mask ||
1859 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1863 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1867 * Some future proofing on the types since the uAPI is wider than the
1868 * current internal implementation.
1870 if (overflows_type(user->slice_mask, context->slice_mask) ||
1871 overflows_type(user->subslice_mask, context->subslice_mask) ||
1872 overflows_type(user->min_eus_per_subslice,
1873 context->min_eus_per_subslice) ||
1874 overflows_type(user->max_eus_per_subslice,
1875 context->max_eus_per_subslice))
1878 /* Check validity against hardware. */
1879 if (user->slice_mask & ~device->slice_mask)
1882 if (user->subslice_mask & ~device->subslice_mask[0])
1885 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1888 context->slice_mask = user->slice_mask;
1889 context->subslice_mask = user->subslice_mask;
1890 context->min_eus_per_subslice = user->min_eus_per_subslice;
1891 context->max_eus_per_subslice = user->max_eus_per_subslice;
1893 /* Part specific restrictions. */
1894 if (GRAPHICS_VER(i915) == 11) {
1895 unsigned int hw_s = hweight8(device->slice_mask);
1896 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1897 unsigned int req_s = hweight8(context->slice_mask);
1898 unsigned int req_ss = hweight8(context->subslice_mask);
1901 * Only full subslice enablement is possible if more than one
1902 * slice is turned on.
1904 if (req_s > 1 && req_ss != hw_ss_per_s)
1908 * If more than four (SScount bitfield limit) subslices are
1909 * requested then the number has to be even.
1911 if (req_ss > 4 && (req_ss & 1))
1915 * If only one slice is enabled and subslice count is below the
1916 * device full enablement, it must be at most half of the all
1917 * available subslices.
1919 if (req_s == 1 && req_ss < hw_ss_per_s &&
1920 req_ss > (hw_ss_per_s / 2))
1923 /* ABI restriction - VME use case only. */
1925 /* All slices or one slice only. */
1926 if (req_s != 1 && req_s != hw_s)
1930 * Half subslices or full enablement only when one slice is
1934 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1937 /* No EU configuration changes. */
1938 if ((user->min_eus_per_subslice !=
1939 device->max_eus_per_subslice) ||
1940 (user->max_eus_per_subslice !=
1941 device->max_eus_per_subslice))
1948 static int set_sseu(struct i915_gem_context *ctx,
1949 struct drm_i915_gem_context_param *args)
1951 struct drm_i915_private *i915 = ctx->i915;
1952 struct drm_i915_gem_context_param_sseu user_sseu;
1953 struct intel_context *ce;
1954 struct intel_sseu sseu;
1955 unsigned long lookup;
1958 if (args->size < sizeof(user_sseu))
1961 if (GRAPHICS_VER(i915) != 11)
1964 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1971 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1975 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1976 lookup |= LOOKUP_USER_INDEX;
1978 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1982 /* Only render engine supports RPCS configuration. */
1983 if (ce->engine->class != RENDER_CLASS) {
1988 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1992 ret = intel_context_reconfigure_sseu(ce, sseu);
1996 args->size = sizeof(user_sseu);
1999 intel_context_put(ce);
2004 set_persistence(struct i915_gem_context *ctx,
2005 const struct drm_i915_gem_context_param *args)
2010 return __context_set_persistence(ctx, args->value);
2013 static int set_priority(struct i915_gem_context *ctx,
2014 const struct drm_i915_gem_context_param *args)
2016 struct i915_gem_engines_iter it;
2017 struct intel_context *ce;
2020 err = validate_priority(ctx->i915, args);
2024 ctx->sched.priority = args->value;
2026 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2027 if (!intel_engine_has_timeslices(ce->engine))
2030 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2031 intel_engine_has_semaphores(ce->engine))
2032 intel_context_set_use_semaphores(ce);
2034 intel_context_clear_use_semaphores(ce);
2036 i915_gem_context_unlock_engines(ctx);
2041 static int get_protected(struct i915_gem_context *ctx,
2042 struct drm_i915_gem_context_param *args)
2045 args->value = i915_gem_context_uses_protected_content(ctx);
2050 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2051 struct i915_gem_context *ctx,
2052 struct drm_i915_gem_context_param *args)
2056 switch (args->param) {
2057 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2060 else if (args->value)
2061 i915_gem_context_set_no_error_capture(ctx);
2063 i915_gem_context_clear_no_error_capture(ctx);
2066 case I915_CONTEXT_PARAM_BANNABLE:
2069 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2071 else if (args->value)
2072 i915_gem_context_set_bannable(ctx);
2073 else if (i915_gem_context_uses_protected_content(ctx))
2074 ret = -EPERM; /* can't clear this for protected contexts */
2076 i915_gem_context_clear_bannable(ctx);
2079 case I915_CONTEXT_PARAM_RECOVERABLE:
2082 else if (!args->value)
2083 i915_gem_context_clear_recoverable(ctx);
2084 else if (i915_gem_context_uses_protected_content(ctx))
2085 ret = -EPERM; /* can't set this for protected contexts */
2087 i915_gem_context_set_recoverable(ctx);
2090 case I915_CONTEXT_PARAM_PRIORITY:
2091 ret = set_priority(ctx, args);
2094 case I915_CONTEXT_PARAM_SSEU:
2095 ret = set_sseu(ctx, args);
2098 case I915_CONTEXT_PARAM_PERSISTENCE:
2099 ret = set_persistence(ctx, args);
2102 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2103 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2104 case I915_CONTEXT_PARAM_BAN_PERIOD:
2105 case I915_CONTEXT_PARAM_RINGSIZE:
2106 case I915_CONTEXT_PARAM_VM:
2107 case I915_CONTEXT_PARAM_ENGINES:
2117 struct i915_gem_proto_context *pc;
2118 struct drm_i915_file_private *fpriv;
2121 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2123 struct drm_i915_gem_context_create_ext_setparam local;
2124 const struct create_ext *arg = data;
2126 if (copy_from_user(&local, ext, sizeof(local)))
2129 if (local.param.ctx_id)
2132 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2135 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2140 static const i915_user_extension_fn create_extensions[] = {
2141 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2142 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2145 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2147 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2150 static inline struct i915_gem_context *
2151 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2153 struct i915_gem_context *ctx;
2156 ctx = xa_load(&file_priv->context_xa, id);
2157 if (ctx && !kref_get_unless_zero(&ctx->ref))
2164 static struct i915_gem_context *
2165 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2166 struct i915_gem_proto_context *pc, u32 id)
2168 struct i915_gem_context *ctx;
2171 lockdep_assert_held(&file_priv->proto_context_lock);
2173 ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2177 gem_context_register(ctx, file_priv, id);
2179 old = xa_erase(&file_priv->proto_context_xa, id);
2180 GEM_BUG_ON(old != pc);
2181 proto_context_close(file_priv->dev_priv, pc);
2183 /* One for the xarray and one for the caller */
2184 return i915_gem_context_get(ctx);
2187 struct i915_gem_context *
2188 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2190 struct i915_gem_proto_context *pc;
2191 struct i915_gem_context *ctx;
2193 ctx = __context_lookup(file_priv, id);
2197 mutex_lock(&file_priv->proto_context_lock);
2198 /* Try one more time under the lock */
2199 ctx = __context_lookup(file_priv, id);
2201 pc = xa_load(&file_priv->proto_context_xa, id);
2203 ctx = ERR_PTR(-ENOENT);
2205 ctx = finalize_create_context_locked(file_priv, pc, id);
2207 mutex_unlock(&file_priv->proto_context_lock);
2212 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2213 struct drm_file *file)
2215 struct drm_i915_private *i915 = to_i915(dev);
2216 struct drm_i915_gem_context_create_ext *args = data;
2217 struct create_ext ext_data;
2221 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2224 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2227 ret = intel_gt_terminally_wedged(to_gt(i915));
2231 ext_data.fpriv = file->driver_priv;
2232 if (client_is_banned(ext_data.fpriv)) {
2234 "client %s[%d] banned from creating ctx\n",
2235 current->comm, task_pid_nr(current));
2239 ext_data.pc = proto_context_create(i915, args->flags);
2240 if (IS_ERR(ext_data.pc))
2241 return PTR_ERR(ext_data.pc);
2243 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2244 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2246 ARRAY_SIZE(create_extensions),
2252 if (GRAPHICS_VER(i915) > 12) {
2253 struct i915_gem_context *ctx;
2255 /* Get ourselves a context ID */
2256 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2257 xa_limit_32b, GFP_KERNEL);
2261 ctx = i915_gem_create_context(i915, ext_data.pc);
2267 proto_context_close(i915, ext_data.pc);
2268 gem_context_register(ctx, ext_data.fpriv, id);
2270 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2276 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2281 proto_context_close(i915, ext_data.pc);
2285 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2286 struct drm_file *file)
2288 struct drm_i915_gem_context_destroy *args = data;
2289 struct drm_i915_file_private *file_priv = file->driver_priv;
2290 struct i915_gem_proto_context *pc;
2291 struct i915_gem_context *ctx;
2299 /* We need to hold the proto-context lock here to prevent races
2300 * with finalize_create_context_locked().
2302 mutex_lock(&file_priv->proto_context_lock);
2303 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2304 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2305 mutex_unlock(&file_priv->proto_context_lock);
2309 GEM_WARN_ON(ctx && pc);
2312 proto_context_close(file_priv->dev_priv, pc);
2320 static int get_sseu(struct i915_gem_context *ctx,
2321 struct drm_i915_gem_context_param *args)
2323 struct drm_i915_gem_context_param_sseu user_sseu;
2324 struct intel_context *ce;
2325 unsigned long lookup;
2328 if (args->size == 0)
2330 else if (args->size < sizeof(user_sseu))
2333 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2340 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2344 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2345 lookup |= LOOKUP_USER_INDEX;
2347 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2351 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2353 intel_context_put(ce);
2357 user_sseu.slice_mask = ce->sseu.slice_mask;
2358 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2359 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2360 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2362 intel_context_unlock_pinned(ce);
2363 intel_context_put(ce);
2365 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2370 args->size = sizeof(user_sseu);
2375 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2376 struct drm_file *file)
2378 struct drm_i915_file_private *file_priv = file->driver_priv;
2379 struct drm_i915_gem_context_param *args = data;
2380 struct i915_gem_context *ctx;
2381 struct i915_address_space *vm;
2384 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2386 return PTR_ERR(ctx);
2388 switch (args->param) {
2389 case I915_CONTEXT_PARAM_GTT_SIZE:
2391 vm = i915_gem_context_get_eb_vm(ctx);
2392 args->value = vm->total;
2397 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2399 args->value = i915_gem_context_no_error_capture(ctx);
2402 case I915_CONTEXT_PARAM_BANNABLE:
2404 args->value = i915_gem_context_is_bannable(ctx);
2407 case I915_CONTEXT_PARAM_RECOVERABLE:
2409 args->value = i915_gem_context_is_recoverable(ctx);
2412 case I915_CONTEXT_PARAM_PRIORITY:
2414 args->value = ctx->sched.priority;
2417 case I915_CONTEXT_PARAM_SSEU:
2418 ret = get_sseu(ctx, args);
2421 case I915_CONTEXT_PARAM_VM:
2422 ret = get_ppgtt(file_priv, ctx, args);
2425 case I915_CONTEXT_PARAM_PERSISTENCE:
2427 args->value = i915_gem_context_is_persistent(ctx);
2430 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2431 ret = get_protected(ctx, args);
2434 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2435 case I915_CONTEXT_PARAM_BAN_PERIOD:
2436 case I915_CONTEXT_PARAM_ENGINES:
2437 case I915_CONTEXT_PARAM_RINGSIZE:
2443 i915_gem_context_put(ctx);
2447 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2448 struct drm_file *file)
2450 struct drm_i915_file_private *file_priv = file->driver_priv;
2451 struct drm_i915_gem_context_param *args = data;
2452 struct i915_gem_proto_context *pc;
2453 struct i915_gem_context *ctx;
2456 mutex_lock(&file_priv->proto_context_lock);
2457 ctx = __context_lookup(file_priv, args->ctx_id);
2459 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2461 /* Contexts should be finalized inside
2462 * GEM_CONTEXT_CREATE starting with graphics
2465 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2466 ret = set_proto_ctx_param(file_priv, pc, args);
2471 mutex_unlock(&file_priv->proto_context_lock);
2474 ret = ctx_setparam(file_priv, ctx, args);
2475 i915_gem_context_put(ctx);
2481 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2482 void *data, struct drm_file *file)
2484 struct drm_i915_private *i915 = to_i915(dev);
2485 struct drm_i915_reset_stats *args = data;
2486 struct i915_gem_context *ctx;
2488 if (args->flags || args->pad)
2491 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2493 return PTR_ERR(ctx);
2496 * We opt for unserialised reads here. This may result in tearing
2497 * in the extremely unlikely event of a GPU hang on this context
2498 * as we are querying them. If we need that extra layer of protection,
2499 * we should wrap the hangstats with a seqlock.
2502 if (capable(CAP_SYS_ADMIN))
2503 args->reset_count = i915_reset_count(&i915->gpu_error);
2505 args->reset_count = 0;
2507 args->batch_active = atomic_read(&ctx->guilty_count);
2508 args->batch_pending = atomic_read(&ctx->active_count);
2510 i915_gem_context_put(ctx);
2514 /* GEM context-engines iterator: for_each_gem_engine() */
2515 struct intel_context *
2516 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2518 const struct i915_gem_engines *e = it->engines;
2519 struct intel_context *ctx;
2525 if (it->idx >= e->num_engines)
2528 ctx = e->engines[it->idx++];
2534 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2535 #include "selftests/mock_context.c"
2536 #include "selftests/i915_gem_context.c"
2539 void i915_gem_context_module_exit(void)
2541 kmem_cache_destroy(slab_luts);
2544 int __init i915_gem_context_module_init(void)
2546 slab_luts = KMEM_CACHE(i915_lut_handle, 0);