2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include <drm/i915_drm.h>
72 #include "gt/intel_lrc_reg.h"
73 #include "gt/intel_engine_user.h"
75 #include "i915_gem_context.h"
76 #include "i915_globals.h"
77 #include "i915_trace.h"
78 #include "i915_user_extensions.h"
80 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
82 static struct i915_global_gem_context {
83 struct i915_global base;
84 struct kmem_cache *slab_luts;
87 struct i915_lut_handle *i915_lut_handle_alloc(void)
89 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
92 void i915_lut_handle_free(struct i915_lut_handle *lut)
94 return kmem_cache_free(global.slab_luts, lut);
97 static void lut_close(struct i915_gem_context *ctx)
99 struct radix_tree_iter iter;
102 lockdep_assert_held(&ctx->mutex);
105 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
106 struct i915_vma *vma = rcu_dereference_raw(*slot);
107 struct drm_i915_gem_object *obj = vma->obj;
108 struct i915_lut_handle *lut;
110 if (!kref_get_unless_zero(&obj->base.refcount))
114 i915_gem_object_lock(obj);
115 list_for_each_entry(lut, &obj->lut_list, obj_link) {
119 if (lut->handle != iter.index)
122 list_del(&lut->obj_link);
125 i915_gem_object_unlock(obj);
128 if (&lut->obj_link != &obj->lut_list) {
129 i915_lut_handle_free(lut);
130 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131 if (atomic_dec_and_test(&vma->open_count) &&
132 !i915_vma_is_ggtt(vma))
134 i915_gem_object_put(obj);
137 i915_gem_object_put(obj);
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
145 const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
150 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151 return ERR_PTR(-EINVAL);
153 if (!i915_gem_context_user_engines(ctx)) {
154 struct intel_engine_cs *engine;
156 engine = intel_engine_lookup_user(ctx->i915,
158 ci->engine_instance);
160 return ERR_PTR(-EINVAL);
162 idx = engine->legacy_idx;
164 idx = ci->engine_instance;
167 return i915_gem_context_get_engine(ctx, idx);
170 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
174 lockdep_assert_held(&i915->contexts.mutex);
176 if (INTEL_GEN(i915) >= 12)
177 max = GEN12_MAX_CONTEXT_HW_ID;
178 else if (INTEL_GEN(i915) >= 11)
179 max = GEN11_MAX_CONTEXT_HW_ID;
180 else if (USES_GUC_SUBMISSION(i915))
182 * When using GuC in proxy submission, GuC consumes the
183 * highest bit in the context id to indicate proxy submission.
185 max = MAX_GUC_CONTEXT_HW_ID;
187 max = MAX_CONTEXT_HW_ID;
189 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
192 static int steal_hw_id(struct drm_i915_private *i915)
194 struct i915_gem_context *ctx, *cn;
198 lockdep_assert_held(&i915->contexts.mutex);
200 list_for_each_entry_safe(ctx, cn,
201 &i915->contexts.hw_id_list, hw_id_link) {
202 if (atomic_read(&ctx->hw_id_pin_count)) {
203 list_move_tail(&ctx->hw_id_link, &pinned);
207 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
208 list_del_init(&ctx->hw_id_link);
214 * Remember how far we got up on the last repossesion scan, so the
215 * list is kept in a "least recently scanned" order.
217 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
221 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
225 lockdep_assert_held(&i915->contexts.mutex);
228 * We prefer to steal/stall ourselves and our users over that of the
229 * entire system. That may be a little unfair to our users, and
230 * even hurt high priority clients. The choice is whether to oomkill
231 * something else, or steal a context id.
233 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
234 if (unlikely(ret < 0)) {
235 ret = steal_hw_id(i915);
236 if (ret < 0) /* once again for the correct errno code */
237 ret = new_hw_id(i915, GFP_KERNEL);
246 static void release_hw_id(struct i915_gem_context *ctx)
248 struct drm_i915_private *i915 = ctx->i915;
250 if (list_empty(&ctx->hw_id_link))
253 mutex_lock(&i915->contexts.mutex);
254 if (!list_empty(&ctx->hw_id_link)) {
255 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
256 list_del_init(&ctx->hw_id_link);
258 mutex_unlock(&i915->contexts.mutex);
261 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
264 if (!e->engines[count])
267 intel_context_put(e->engines[count]);
272 static void free_engines(struct i915_gem_engines *e)
274 __free_engines(e, e->num_engines);
277 static void free_engines_rcu(struct rcu_head *rcu)
279 free_engines(container_of(rcu, struct i915_gem_engines, rcu));
282 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
284 const struct intel_gt *gt = &ctx->i915->gt;
285 struct intel_engine_cs *engine;
286 struct i915_gem_engines *e;
287 enum intel_engine_id id;
289 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
291 return ERR_PTR(-ENOMEM);
293 init_rcu_head(&e->rcu);
294 for_each_engine(engine, gt, id) {
295 struct intel_context *ce;
297 ce = intel_context_create(ctx, engine);
299 __free_engines(e, id);
304 e->num_engines = id + 1;
310 static void i915_gem_context_free(struct i915_gem_context *ctx)
312 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
313 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
317 i915_vm_put(ctx->vm);
319 free_engines(rcu_access_pointer(ctx->engines));
320 mutex_destroy(&ctx->engines_mutex);
322 kfree(ctx->jump_whitelist);
325 intel_timeline_put(ctx->timeline);
330 list_del(&ctx->link);
331 mutex_destroy(&ctx->mutex);
336 static void contexts_free(struct drm_i915_private *i915)
338 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
339 struct i915_gem_context *ctx, *cn;
341 lockdep_assert_held(&i915->drm.struct_mutex);
343 llist_for_each_entry_safe(ctx, cn, freed, free_link)
344 i915_gem_context_free(ctx);
347 static void contexts_free_first(struct drm_i915_private *i915)
349 struct i915_gem_context *ctx;
350 struct llist_node *freed;
352 lockdep_assert_held(&i915->drm.struct_mutex);
354 freed = llist_del_first(&i915->contexts.free_list);
358 ctx = container_of(freed, typeof(*ctx), free_link);
359 i915_gem_context_free(ctx);
362 static void contexts_free_worker(struct work_struct *work)
364 struct drm_i915_private *i915 =
365 container_of(work, typeof(*i915), contexts.free_work);
367 mutex_lock(&i915->drm.struct_mutex);
369 mutex_unlock(&i915->drm.struct_mutex);
372 void i915_gem_context_release(struct kref *ref)
374 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
375 struct drm_i915_private *i915 = ctx->i915;
377 trace_i915_context_free(ctx);
378 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
379 queue_work(i915->wq, &i915->contexts.free_work);
382 static void context_close(struct i915_gem_context *ctx)
384 mutex_lock(&ctx->mutex);
386 i915_gem_context_set_closed(ctx);
387 ctx->file_priv = ERR_PTR(-EBADF);
390 * This context will never again be assinged to HW, so we can
391 * reuse its ID for the next context.
396 * The LUT uses the VMA as a backpointer to unref the object,
397 * so we need to clear the LUT before we close all the VMA (inside
402 mutex_unlock(&ctx->mutex);
403 i915_gem_context_put(ctx);
406 static struct i915_gem_context *
407 __create_context(struct drm_i915_private *i915)
409 struct i915_gem_context *ctx;
410 struct i915_gem_engines *e;
414 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
416 return ERR_PTR(-ENOMEM);
418 kref_init(&ctx->ref);
419 list_add_tail(&ctx->link, &i915->contexts.list);
421 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
422 mutex_init(&ctx->mutex);
424 mutex_init(&ctx->engines_mutex);
425 e = default_engines(ctx);
430 RCU_INIT_POINTER(ctx->engines, e);
432 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
433 INIT_LIST_HEAD(&ctx->hw_id_link);
435 /* NB: Mark all slices as needing a remap so that when the context first
436 * loads it will restore whatever remap state already exists. If there
437 * is no remap info, it will be a NOP. */
438 ctx->remap_slice = ALL_L3_SLICES(i915);
440 i915_gem_context_set_bannable(ctx);
441 i915_gem_context_set_recoverable(ctx);
443 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
444 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
446 ctx->jump_whitelist = NULL;
447 ctx->jump_whitelist_cmds = 0;
457 context_apply_all(struct i915_gem_context *ctx,
458 void (*fn)(struct intel_context *ce, void *data),
461 struct i915_gem_engines_iter it;
462 struct intel_context *ce;
464 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
466 i915_gem_context_unlock_engines(ctx);
469 static void __apply_ppgtt(struct intel_context *ce, void *vm)
472 ce->vm = i915_vm_get(vm);
475 static struct i915_address_space *
476 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
478 struct i915_address_space *old = ctx->vm;
480 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
482 ctx->vm = i915_vm_get(vm);
483 context_apply_all(ctx, __apply_ppgtt, vm);
488 static void __assign_ppgtt(struct i915_gem_context *ctx,
489 struct i915_address_space *vm)
494 vm = __set_ppgtt(ctx, vm);
499 static void __set_timeline(struct intel_timeline **dst,
500 struct intel_timeline *src)
502 struct intel_timeline *old = *dst;
504 *dst = src ? intel_timeline_get(src) : NULL;
507 intel_timeline_put(old);
510 static void __apply_timeline(struct intel_context *ce, void *timeline)
512 __set_timeline(&ce->timeline, timeline);
515 static void __assign_timeline(struct i915_gem_context *ctx,
516 struct intel_timeline *timeline)
518 __set_timeline(&ctx->timeline, timeline);
519 context_apply_all(ctx, __apply_timeline, timeline);
522 static struct i915_gem_context *
523 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
525 struct i915_gem_context *ctx;
527 lockdep_assert_held(&dev_priv->drm.struct_mutex);
529 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
530 !HAS_EXECLISTS(dev_priv))
531 return ERR_PTR(-EINVAL);
533 /* Reap the most stale context */
534 contexts_free_first(dev_priv);
536 ctx = __create_context(dev_priv);
540 if (HAS_FULL_PPGTT(dev_priv)) {
541 struct i915_ppgtt *ppgtt;
543 ppgtt = i915_ppgtt_create(dev_priv);
545 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
548 return ERR_CAST(ppgtt);
551 __assign_ppgtt(ctx, &ppgtt->vm);
552 i915_vm_put(&ppgtt->vm);
555 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
556 struct intel_timeline *timeline;
558 timeline = intel_timeline_create(&dev_priv->gt, NULL);
559 if (IS_ERR(timeline)) {
561 return ERR_CAST(timeline);
564 __assign_timeline(ctx, timeline);
565 intel_timeline_put(timeline);
568 trace_i915_context_create(ctx);
574 destroy_kernel_context(struct i915_gem_context **ctxp)
576 struct i915_gem_context *ctx;
578 /* Keep the context ref so that we can free it immediately ourselves */
579 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
580 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
583 i915_gem_context_free(ctx);
586 struct i915_gem_context *
587 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
589 struct i915_gem_context *ctx;
592 ctx = i915_gem_create_context(i915, 0);
596 err = i915_gem_context_pin_hw_id(ctx);
598 destroy_kernel_context(&ctx);
602 i915_gem_context_clear_bannable(ctx);
603 ctx->sched.priority = I915_USER_PRIORITY(prio);
605 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
610 static void init_contexts(struct drm_i915_private *i915)
612 mutex_init(&i915->contexts.mutex);
613 INIT_LIST_HEAD(&i915->contexts.list);
615 /* Using the simple ida interface, the max is limited by sizeof(int) */
616 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
617 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
618 ida_init(&i915->contexts.hw_ida);
619 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
621 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
622 init_llist_head(&i915->contexts.free_list);
625 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
627 struct i915_gem_context *ctx;
629 /* Reassure ourselves we are only called once */
630 GEM_BUG_ON(dev_priv->kernel_context);
632 init_contexts(dev_priv);
634 /* lowest priority; idle task */
635 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
637 DRM_ERROR("Failed to create default global context\n");
641 * For easy recognisablity, we want the kernel context to be 0 and then
642 * all user contexts will have non-zero hw_id. Kernel contexts are
643 * permanently pinned, so that we never suffer a stall and can
644 * use them from any allocation context (e.g. for evicting other
645 * contexts and from inside the shrinker).
647 GEM_BUG_ON(ctx->hw_id);
648 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
649 dev_priv->kernel_context = ctx;
651 DRM_DEBUG_DRIVER("%s context support initialized\n",
652 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
657 void i915_gem_contexts_fini(struct drm_i915_private *i915)
659 lockdep_assert_held(&i915->drm.struct_mutex);
661 destroy_kernel_context(&i915->kernel_context);
663 /* Must free all deferred contexts (via flush_workqueue) first */
664 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
665 ida_destroy(&i915->contexts.hw_ida);
668 static int context_idr_cleanup(int id, void *p, void *data)
674 static int vm_idr_cleanup(int id, void *p, void *data)
680 static int gem_context_register(struct i915_gem_context *ctx,
681 struct drm_i915_file_private *fpriv)
685 ctx->file_priv = fpriv;
687 ctx->vm->file = fpriv;
689 ctx->pid = get_task_pid(current, PIDTYPE_PID);
690 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
691 current->comm, pid_nr(ctx->pid));
697 /* And finally expose ourselves to userspace via the idr */
698 mutex_lock(&fpriv->context_idr_lock);
699 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
700 mutex_unlock(&fpriv->context_idr_lock);
704 kfree(fetch_and_zero(&ctx->name));
706 put_pid(fetch_and_zero(&ctx->pid));
711 int i915_gem_context_open(struct drm_i915_private *i915,
712 struct drm_file *file)
714 struct drm_i915_file_private *file_priv = file->driver_priv;
715 struct i915_gem_context *ctx;
718 mutex_init(&file_priv->context_idr_lock);
719 mutex_init(&file_priv->vm_idr_lock);
721 idr_init(&file_priv->context_idr);
722 idr_init_base(&file_priv->vm_idr, 1);
724 mutex_lock(&i915->drm.struct_mutex);
725 ctx = i915_gem_create_context(i915, 0);
726 mutex_unlock(&i915->drm.struct_mutex);
732 err = gem_context_register(ctx, file_priv);
736 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
744 idr_destroy(&file_priv->vm_idr);
745 idr_destroy(&file_priv->context_idr);
746 mutex_destroy(&file_priv->vm_idr_lock);
747 mutex_destroy(&file_priv->context_idr_lock);
751 void i915_gem_context_close(struct drm_file *file)
753 struct drm_i915_file_private *file_priv = file->driver_priv;
755 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
756 idr_destroy(&file_priv->context_idr);
757 mutex_destroy(&file_priv->context_idr_lock);
759 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
760 idr_destroy(&file_priv->vm_idr);
761 mutex_destroy(&file_priv->vm_idr_lock);
764 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
765 struct drm_file *file)
767 struct drm_i915_private *i915 = to_i915(dev);
768 struct drm_i915_gem_vm_control *args = data;
769 struct drm_i915_file_private *file_priv = file->driver_priv;
770 struct i915_ppgtt *ppgtt;
773 if (!HAS_FULL_PPGTT(i915))
779 ppgtt = i915_ppgtt_create(i915);
781 return PTR_ERR(ppgtt);
783 ppgtt->vm.file = file_priv;
785 if (args->extensions) {
786 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
793 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
797 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
801 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
803 mutex_unlock(&file_priv->vm_idr_lock);
809 mutex_unlock(&file_priv->vm_idr_lock);
811 i915_vm_put(&ppgtt->vm);
815 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
816 struct drm_file *file)
818 struct drm_i915_file_private *file_priv = file->driver_priv;
819 struct drm_i915_gem_vm_control *args = data;
820 struct i915_address_space *vm;
827 if (args->extensions)
834 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
838 vm = idr_remove(&file_priv->vm_idr, id);
840 mutex_unlock(&file_priv->vm_idr_lock);
848 struct context_barrier_task {
849 struct i915_active base;
850 void (*task)(void *data);
854 static void cb_retire(struct i915_active *base)
856 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
861 i915_active_fini(&cb->base);
865 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
866 static int context_barrier_task(struct i915_gem_context *ctx,
867 intel_engine_mask_t engines,
868 bool (*skip)(struct intel_context *ce, void *data),
869 int (*emit)(struct i915_request *rq, void *data),
870 void (*task)(void *data),
873 struct drm_i915_private *i915 = ctx->i915;
874 struct context_barrier_task *cb;
875 struct i915_gem_engines_iter it;
876 struct intel_context *ce;
879 lockdep_assert_held(&i915->drm.struct_mutex);
882 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
886 i915_active_init(i915, &cb->base, NULL, cb_retire);
887 err = i915_active_acquire(&cb->base);
893 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
894 struct i915_request *rq;
896 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
902 if (!(ce->engine->mask & engines))
905 if (skip && skip(ce, data))
908 rq = intel_context_create_request(ce);
916 err = emit(rq, data);
918 err = i915_active_ref(&cb->base, rq->timeline, rq);
920 i915_request_add(rq);
924 i915_gem_context_unlock_engines(ctx);
926 cb->task = err ? NULL : task; /* caller needs to unwind instead */
929 i915_active_release(&cb->base);
934 static int get_ppgtt(struct drm_i915_file_private *file_priv,
935 struct i915_gem_context *ctx,
936 struct drm_i915_gem_context_param *args)
938 struct i915_address_space *vm;
944 /* XXX rcu acquire? */
945 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
949 vm = i915_vm_get(ctx->vm);
950 mutex_unlock(&ctx->i915->drm.struct_mutex);
952 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
956 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
968 mutex_unlock(&file_priv->vm_idr_lock);
974 static void set_ppgtt_barrier(void *data)
976 struct i915_address_space *old = data;
978 if (INTEL_GEN(old->i915) < 8)
979 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
984 static int emit_ppgtt_update(struct i915_request *rq, void *data)
986 struct i915_address_space *vm = rq->hw_context->vm;
987 struct intel_engine_cs *engine = rq->engine;
988 u32 base = engine->mmio_base;
992 if (i915_vm_is_4lvl(vm)) {
993 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
994 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
996 cs = intel_ring_begin(rq, 6);
1000 *cs++ = MI_LOAD_REGISTER_IMM(2);
1002 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1003 *cs++ = upper_32_bits(pd_daddr);
1004 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1005 *cs++ = lower_32_bits(pd_daddr);
1008 intel_ring_advance(rq, cs);
1009 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1010 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1012 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1016 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1017 for (i = GEN8_3LVL_PDPES; i--; ) {
1018 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1020 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1021 *cs++ = upper_32_bits(pd_daddr);
1022 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1023 *cs++ = lower_32_bits(pd_daddr);
1026 intel_ring_advance(rq, cs);
1028 /* ppGTT is not part of the legacy context image */
1029 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1035 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1037 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1040 return !atomic_read(&ce->pin_count);
1043 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1044 struct i915_gem_context *ctx,
1045 struct drm_i915_gem_context_param *args)
1047 struct i915_address_space *vm, *old;
1056 if (upper_32_bits(args->value))
1059 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1063 vm = idr_find(&file_priv->vm_idr, args->value);
1066 mutex_unlock(&file_priv->vm_idr_lock);
1070 err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1077 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1078 mutex_lock(&ctx->mutex);
1080 mutex_unlock(&ctx->mutex);
1082 old = __set_ppgtt(ctx, vm);
1085 * We need to flush any requests using the current ppgtt before
1086 * we release it as the requests do not hold a reference themselves,
1087 * only indirectly through the context.
1089 err = context_barrier_task(ctx, ALL_ENGINES,
1095 i915_vm_put(__set_ppgtt(ctx, old));
1100 mutex_unlock(&ctx->i915->drm.struct_mutex);
1107 static int gen8_emit_rpcs_config(struct i915_request *rq,
1108 struct intel_context *ce,
1109 struct intel_sseu sseu)
1114 cs = intel_ring_begin(rq, 4);
1118 offset = i915_ggtt_offset(ce->state) +
1119 LRC_STATE_PN * PAGE_SIZE +
1120 (CTX_R_PWR_CLK_STATE + 1) * 4;
1122 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1123 *cs++ = lower_32_bits(offset);
1124 *cs++ = upper_32_bits(offset);
1125 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1127 intel_ring_advance(rq, cs);
1133 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1135 struct i915_request *rq;
1138 lockdep_assert_held(&ce->pin_mutex);
1141 * If the context is not idle, we have to submit an ordered request to
1142 * modify its context image via the kernel context (writing to our own
1143 * image, or into the registers directory, does not stick). Pristine
1144 * and idle contexts will be configured on pinning.
1146 if (!intel_context_is_pinned(ce))
1149 rq = i915_request_create(ce->engine->kernel_context);
1153 /* Serialise with the remote context */
1154 ret = intel_context_prepare_remote_request(ce, rq);
1156 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1158 i915_request_add(rq);
1163 __intel_context_reconfigure_sseu(struct intel_context *ce,
1164 struct intel_sseu sseu)
1168 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1170 ret = intel_context_lock_pinned(ce);
1174 /* Nothing to do if unmodified. */
1175 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1178 ret = gen8_modify_rpcs(ce, sseu);
1183 intel_context_unlock_pinned(ce);
1188 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1190 struct drm_i915_private *i915 = ce->engine->i915;
1193 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1197 ret = __intel_context_reconfigure_sseu(ce, sseu);
1199 mutex_unlock(&i915->drm.struct_mutex);
1205 user_to_context_sseu(struct drm_i915_private *i915,
1206 const struct drm_i915_gem_context_param_sseu *user,
1207 struct intel_sseu *context)
1209 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1211 /* No zeros in any field. */
1212 if (!user->slice_mask || !user->subslice_mask ||
1213 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1217 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1221 * Some future proofing on the types since the uAPI is wider than the
1222 * current internal implementation.
1224 if (overflows_type(user->slice_mask, context->slice_mask) ||
1225 overflows_type(user->subslice_mask, context->subslice_mask) ||
1226 overflows_type(user->min_eus_per_subslice,
1227 context->min_eus_per_subslice) ||
1228 overflows_type(user->max_eus_per_subslice,
1229 context->max_eus_per_subslice))
1232 /* Check validity against hardware. */
1233 if (user->slice_mask & ~device->slice_mask)
1236 if (user->subslice_mask & ~device->subslice_mask[0])
1239 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1242 context->slice_mask = user->slice_mask;
1243 context->subslice_mask = user->subslice_mask;
1244 context->min_eus_per_subslice = user->min_eus_per_subslice;
1245 context->max_eus_per_subslice = user->max_eus_per_subslice;
1247 /* Part specific restrictions. */
1248 if (IS_GEN(i915, 11)) {
1249 unsigned int hw_s = hweight8(device->slice_mask);
1250 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1251 unsigned int req_s = hweight8(context->slice_mask);
1252 unsigned int req_ss = hweight8(context->subslice_mask);
1255 * Only full subslice enablement is possible if more than one
1256 * slice is turned on.
1258 if (req_s > 1 && req_ss != hw_ss_per_s)
1262 * If more than four (SScount bitfield limit) subslices are
1263 * requested then the number has to be even.
1265 if (req_ss > 4 && (req_ss & 1))
1269 * If only one slice is enabled and subslice count is below the
1270 * device full enablement, it must be at most half of the all
1271 * available subslices.
1273 if (req_s == 1 && req_ss < hw_ss_per_s &&
1274 req_ss > (hw_ss_per_s / 2))
1277 /* ABI restriction - VME use case only. */
1279 /* All slices or one slice only. */
1280 if (req_s != 1 && req_s != hw_s)
1284 * Half subslices or full enablement only when one slice is
1288 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1291 /* No EU configuration changes. */
1292 if ((user->min_eus_per_subslice !=
1293 device->max_eus_per_subslice) ||
1294 (user->max_eus_per_subslice !=
1295 device->max_eus_per_subslice))
1302 static int set_sseu(struct i915_gem_context *ctx,
1303 struct drm_i915_gem_context_param *args)
1305 struct drm_i915_private *i915 = ctx->i915;
1306 struct drm_i915_gem_context_param_sseu user_sseu;
1307 struct intel_context *ce;
1308 struct intel_sseu sseu;
1309 unsigned long lookup;
1312 if (args->size < sizeof(user_sseu))
1315 if (!IS_GEN(i915, 11))
1318 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1325 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1329 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1330 lookup |= LOOKUP_USER_INDEX;
1332 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1336 /* Only render engine supports RPCS configuration. */
1337 if (ce->engine->class != RENDER_CLASS) {
1342 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1346 ret = intel_context_reconfigure_sseu(ce, sseu);
1350 args->size = sizeof(user_sseu);
1353 intel_context_put(ce);
1357 struct set_engines {
1358 struct i915_gem_context *ctx;
1359 struct i915_gem_engines *engines;
1363 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1365 struct i915_context_engines_load_balance __user *ext =
1366 container_of_user(base, typeof(*ext), base);
1367 const struct set_engines *set = data;
1368 struct intel_engine_cs *stack[16];
1369 struct intel_engine_cs **siblings;
1370 struct intel_context *ce;
1371 u16 num_siblings, idx;
1375 if (!HAS_EXECLISTS(set->ctx->i915))
1378 if (USES_GUC_SUBMISSION(set->ctx->i915))
1379 return -ENODEV; /* not implement yet */
1381 if (get_user(idx, &ext->engine_index))
1384 if (idx >= set->engines->num_engines) {
1385 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1386 idx, set->engines->num_engines);
1390 idx = array_index_nospec(idx, set->engines->num_engines);
1391 if (set->engines->engines[idx]) {
1392 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1396 if (get_user(num_siblings, &ext->num_siblings))
1399 err = check_user_mbz(&ext->flags);
1403 err = check_user_mbz(&ext->mbz64);
1408 if (num_siblings > ARRAY_SIZE(stack)) {
1409 siblings = kmalloc_array(num_siblings,
1416 for (n = 0; n < num_siblings; n++) {
1417 struct i915_engine_class_instance ci;
1419 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1424 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1426 ci.engine_instance);
1428 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1429 n, ci.engine_class, ci.engine_instance);
1435 ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1441 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1442 intel_context_put(ce);
1448 if (siblings != stack)
1455 set_engines__bond(struct i915_user_extension __user *base, void *data)
1457 struct i915_context_engines_bond __user *ext =
1458 container_of_user(base, typeof(*ext), base);
1459 const struct set_engines *set = data;
1460 struct i915_engine_class_instance ci;
1461 struct intel_engine_cs *virtual;
1462 struct intel_engine_cs *master;
1466 if (get_user(idx, &ext->virtual_index))
1469 if (idx >= set->engines->num_engines) {
1470 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1471 idx, set->engines->num_engines);
1475 idx = array_index_nospec(idx, set->engines->num_engines);
1476 if (!set->engines->engines[idx]) {
1477 DRM_DEBUG("Invalid engine at %d\n", idx);
1480 virtual = set->engines->engines[idx]->engine;
1482 err = check_user_mbz(&ext->flags);
1486 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1487 err = check_user_mbz(&ext->mbz64[n]);
1492 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1495 master = intel_engine_lookup_user(set->ctx->i915,
1496 ci.engine_class, ci.engine_instance);
1498 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1499 ci.engine_class, ci.engine_instance);
1503 if (get_user(num_bonds, &ext->num_bonds))
1506 for (n = 0; n < num_bonds; n++) {
1507 struct intel_engine_cs *bond;
1509 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1512 bond = intel_engine_lookup_user(set->ctx->i915,
1514 ci.engine_instance);
1516 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1517 n, ci.engine_class, ci.engine_instance);
1522 * A non-virtual engine has no siblings to choose between; and
1523 * a submit fence will always be directed to the one engine.
1525 if (intel_engine_is_virtual(virtual)) {
1526 err = intel_virtual_engine_attach_bond(virtual,
1537 static const i915_user_extension_fn set_engines__extensions[] = {
1538 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1539 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1543 set_engines(struct i915_gem_context *ctx,
1544 const struct drm_i915_gem_context_param *args)
1546 struct i915_context_param_engines __user *user =
1547 u64_to_user_ptr(args->value);
1548 struct set_engines set = { .ctx = ctx };
1549 unsigned int num_engines, n;
1553 if (!args->size) { /* switch back to legacy user_ring_map */
1554 if (!i915_gem_context_user_engines(ctx))
1557 set.engines = default_engines(ctx);
1558 if (IS_ERR(set.engines))
1559 return PTR_ERR(set.engines);
1564 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1565 if (args->size < sizeof(*user) ||
1566 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1567 DRM_DEBUG("Invalid size for engine array: %d\n",
1573 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1574 * first 64 engines defined here.
1576 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1578 set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1583 init_rcu_head(&set.engines->rcu);
1584 for (n = 0; n < num_engines; n++) {
1585 struct i915_engine_class_instance ci;
1586 struct intel_engine_cs *engine;
1587 struct intel_context *ce;
1589 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1590 __free_engines(set.engines, n);
1594 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1595 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1596 set.engines->engines[n] = NULL;
1600 engine = intel_engine_lookup_user(ctx->i915,
1602 ci.engine_instance);
1604 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1605 n, ci.engine_class, ci.engine_instance);
1606 __free_engines(set.engines, n);
1610 ce = intel_context_create(ctx, engine);
1612 __free_engines(set.engines, n);
1616 set.engines->engines[n] = ce;
1618 set.engines->num_engines = num_engines;
1621 if (!get_user(extensions, &user->extensions))
1622 err = i915_user_extensions(u64_to_user_ptr(extensions),
1623 set_engines__extensions,
1624 ARRAY_SIZE(set_engines__extensions),
1627 free_engines(set.engines);
1632 mutex_lock(&ctx->engines_mutex);
1634 i915_gem_context_set_user_engines(ctx);
1636 i915_gem_context_clear_user_engines(ctx);
1637 rcu_swap_protected(ctx->engines, set.engines, 1);
1638 mutex_unlock(&ctx->engines_mutex);
1640 call_rcu(&set.engines->rcu, free_engines_rcu);
1645 static struct i915_gem_engines *
1646 __copy_engines(struct i915_gem_engines *e)
1648 struct i915_gem_engines *copy;
1651 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1653 return ERR_PTR(-ENOMEM);
1655 init_rcu_head(©->rcu);
1656 for (n = 0; n < e->num_engines; n++) {
1658 copy->engines[n] = intel_context_get(e->engines[n]);
1660 copy->engines[n] = NULL;
1662 copy->num_engines = n;
1668 get_engines(struct i915_gem_context *ctx,
1669 struct drm_i915_gem_context_param *args)
1671 struct i915_context_param_engines __user *user;
1672 struct i915_gem_engines *e;
1673 size_t n, count, size;
1676 err = mutex_lock_interruptible(&ctx->engines_mutex);
1681 if (i915_gem_context_user_engines(ctx))
1682 e = __copy_engines(i915_gem_context_engines(ctx));
1683 mutex_unlock(&ctx->engines_mutex);
1684 if (IS_ERR_OR_NULL(e)) {
1686 return PTR_ERR_OR_ZERO(e);
1689 count = e->num_engines;
1691 /* Be paranoid in case we have an impedance mismatch */
1692 if (!check_struct_size(user, engines, count, &size)) {
1696 if (overflows_type(size, args->size)) {
1706 if (args->size < size) {
1711 user = u64_to_user_ptr(args->value);
1712 if (!access_ok(user, size)) {
1717 if (put_user(0, &user->extensions)) {
1722 for (n = 0; n < count; n++) {
1723 struct i915_engine_class_instance ci = {
1724 .engine_class = I915_ENGINE_CLASS_INVALID,
1725 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1728 if (e->engines[n]) {
1729 ci.engine_class = e->engines[n]->engine->uabi_class;
1730 ci.engine_instance = e->engines[n]->engine->uabi_instance;
1733 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1746 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1747 struct i915_gem_context *ctx,
1748 struct drm_i915_gem_context_param *args)
1752 switch (args->param) {
1753 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1756 else if (args->value)
1757 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1759 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1762 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1765 else if (args->value)
1766 i915_gem_context_set_no_error_capture(ctx);
1768 i915_gem_context_clear_no_error_capture(ctx);
1771 case I915_CONTEXT_PARAM_BANNABLE:
1774 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1776 else if (args->value)
1777 i915_gem_context_set_bannable(ctx);
1779 i915_gem_context_clear_bannable(ctx);
1782 case I915_CONTEXT_PARAM_RECOVERABLE:
1785 else if (args->value)
1786 i915_gem_context_set_recoverable(ctx);
1788 i915_gem_context_clear_recoverable(ctx);
1791 case I915_CONTEXT_PARAM_PRIORITY:
1793 s64 priority = args->value;
1797 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1799 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1800 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1802 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1803 !capable(CAP_SYS_NICE))
1806 ctx->sched.priority =
1807 I915_USER_PRIORITY(priority);
1811 case I915_CONTEXT_PARAM_SSEU:
1812 ret = set_sseu(ctx, args);
1815 case I915_CONTEXT_PARAM_VM:
1816 ret = set_ppgtt(fpriv, ctx, args);
1819 case I915_CONTEXT_PARAM_ENGINES:
1820 ret = set_engines(ctx, args);
1823 case I915_CONTEXT_PARAM_BAN_PERIOD:
1833 struct i915_gem_context *ctx;
1834 struct drm_i915_file_private *fpriv;
1837 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1839 struct drm_i915_gem_context_create_ext_setparam local;
1840 const struct create_ext *arg = data;
1842 if (copy_from_user(&local, ext, sizeof(local)))
1845 if (local.param.ctx_id)
1848 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1851 static int clone_engines(struct i915_gem_context *dst,
1852 struct i915_gem_context *src)
1854 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1855 struct i915_gem_engines *clone;
1859 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1863 init_rcu_head(&clone->rcu);
1864 for (n = 0; n < e->num_engines; n++) {
1865 struct intel_engine_cs *engine;
1867 if (!e->engines[n]) {
1868 clone->engines[n] = NULL;
1871 engine = e->engines[n]->engine;
1874 * Virtual engines are singletons; they can only exist
1875 * inside a single context, because they embed their
1876 * HW context... As each virtual context implies a single
1877 * timeline (each engine can only dequeue a single request
1878 * at any time), it would be surprising for two contexts
1879 * to use the same engine. So let's create a copy of
1880 * the virtual engine instead.
1882 if (intel_engine_is_virtual(engine))
1884 intel_execlists_clone_virtual(dst, engine);
1886 clone->engines[n] = intel_context_create(dst, engine);
1887 if (IS_ERR_OR_NULL(clone->engines[n])) {
1888 __free_engines(clone, n);
1892 clone->num_engines = n;
1894 user_engines = i915_gem_context_user_engines(src);
1895 i915_gem_context_unlock_engines(src);
1897 free_engines(dst->engines);
1898 RCU_INIT_POINTER(dst->engines, clone);
1900 i915_gem_context_set_user_engines(dst);
1902 i915_gem_context_clear_user_engines(dst);
1906 i915_gem_context_unlock_engines(src);
1910 static int clone_flags(struct i915_gem_context *dst,
1911 struct i915_gem_context *src)
1913 dst->user_flags = src->user_flags;
1917 static int clone_schedattr(struct i915_gem_context *dst,
1918 struct i915_gem_context *src)
1920 dst->sched = src->sched;
1924 static int clone_sseu(struct i915_gem_context *dst,
1925 struct i915_gem_context *src)
1927 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1928 struct i915_gem_engines *clone;
1932 clone = dst->engines; /* no locking required; sole access */
1933 if (e->num_engines != clone->num_engines) {
1938 for (n = 0; n < e->num_engines; n++) {
1939 struct intel_context *ce = e->engines[n];
1941 if (clone->engines[n]->engine->class != ce->engine->class) {
1942 /* Must have compatible engine maps! */
1947 /* serialises with set_sseu */
1948 err = intel_context_lock_pinned(ce);
1952 clone->engines[n]->sseu = ce->sseu;
1953 intel_context_unlock_pinned(ce);
1958 i915_gem_context_unlock_engines(src);
1962 static int clone_timeline(struct i915_gem_context *dst,
1963 struct i915_gem_context *src)
1966 __assign_timeline(dst, src->timeline);
1971 static int clone_vm(struct i915_gem_context *dst,
1972 struct i915_gem_context *src)
1974 struct i915_address_space *vm;
1978 vm = READ_ONCE(src->vm);
1982 if (!kref_get_unless_zero(&vm->ref))
1986 * This ppgtt may have be reallocated between
1987 * the read and the kref, and reassigned to a third
1988 * context. In order to avoid inadvertent sharing
1989 * of this ppgtt with that third context (and not
1990 * src), we have to confirm that we have the same
1991 * ppgtt after passing through the strong memory
1992 * barrier implied by a successful
1993 * kref_get_unless_zero().
1995 * Once we have acquired the current ppgtt of src,
1996 * we no longer care if it is released from src, as
1997 * it cannot be reallocated elsewhere.
2000 if (vm == READ_ONCE(src->vm))
2008 __assign_ppgtt(dst, vm);
2015 static int create_clone(struct i915_user_extension __user *ext, void *data)
2017 static int (* const fn[])(struct i915_gem_context *dst,
2018 struct i915_gem_context *src) = {
2019 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2020 MAP(ENGINES, clone_engines),
2021 MAP(FLAGS, clone_flags),
2022 MAP(SCHEDATTR, clone_schedattr),
2023 MAP(SSEU, clone_sseu),
2024 MAP(TIMELINE, clone_timeline),
2028 struct drm_i915_gem_context_create_ext_clone local;
2029 const struct create_ext *arg = data;
2030 struct i915_gem_context *dst = arg->ctx;
2031 struct i915_gem_context *src;
2034 if (copy_from_user(&local, ext, sizeof(local)))
2037 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2038 I915_CONTEXT_CLONE_UNKNOWN);
2040 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2047 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2052 GEM_BUG_ON(src == dst);
2054 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2055 if (!(local.flags & BIT(bit)))
2058 err = fn[bit](dst, src);
2066 static const i915_user_extension_fn create_extensions[] = {
2067 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2068 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2071 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2073 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2076 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2077 struct drm_file *file)
2079 struct drm_i915_private *i915 = to_i915(dev);
2080 struct drm_i915_gem_context_create_ext *args = data;
2081 struct create_ext ext_data;
2084 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2087 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2090 ret = intel_gt_terminally_wedged(&i915->gt);
2094 ext_data.fpriv = file->driver_priv;
2095 if (client_is_banned(ext_data.fpriv)) {
2096 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2098 pid_nr(get_task_pid(current, PIDTYPE_PID)));
2102 ret = i915_mutex_lock_interruptible(dev);
2106 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2107 mutex_unlock(&dev->struct_mutex);
2108 if (IS_ERR(ext_data.ctx))
2109 return PTR_ERR(ext_data.ctx);
2111 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2112 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2114 ARRAY_SIZE(create_extensions),
2120 ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2125 DRM_DEBUG("HW context %d created\n", args->ctx_id);
2130 context_close(ext_data.ctx);
2134 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2135 struct drm_file *file)
2137 struct drm_i915_gem_context_destroy *args = data;
2138 struct drm_i915_file_private *file_priv = file->driver_priv;
2139 struct i915_gem_context *ctx;
2147 if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2150 ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2151 mutex_unlock(&file_priv->context_idr_lock);
2159 static int get_sseu(struct i915_gem_context *ctx,
2160 struct drm_i915_gem_context_param *args)
2162 struct drm_i915_gem_context_param_sseu user_sseu;
2163 struct intel_context *ce;
2164 unsigned long lookup;
2167 if (args->size == 0)
2169 else if (args->size < sizeof(user_sseu))
2172 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2179 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2183 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2184 lookup |= LOOKUP_USER_INDEX;
2186 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2190 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2192 intel_context_put(ce);
2196 user_sseu.slice_mask = ce->sseu.slice_mask;
2197 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2198 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2199 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2201 intel_context_unlock_pinned(ce);
2202 intel_context_put(ce);
2204 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2209 args->size = sizeof(user_sseu);
2214 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2215 struct drm_file *file)
2217 struct drm_i915_file_private *file_priv = file->driver_priv;
2218 struct drm_i915_gem_context_param *args = data;
2219 struct i915_gem_context *ctx;
2222 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2226 switch (args->param) {
2227 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2229 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2232 case I915_CONTEXT_PARAM_GTT_SIZE:
2235 args->value = ctx->vm->total;
2236 else if (to_i915(dev)->ggtt.alias)
2237 args->value = to_i915(dev)->ggtt.alias->vm.total;
2239 args->value = to_i915(dev)->ggtt.vm.total;
2242 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2244 args->value = i915_gem_context_no_error_capture(ctx);
2247 case I915_CONTEXT_PARAM_BANNABLE:
2249 args->value = i915_gem_context_is_bannable(ctx);
2252 case I915_CONTEXT_PARAM_RECOVERABLE:
2254 args->value = i915_gem_context_is_recoverable(ctx);
2257 case I915_CONTEXT_PARAM_PRIORITY:
2259 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2262 case I915_CONTEXT_PARAM_SSEU:
2263 ret = get_sseu(ctx, args);
2266 case I915_CONTEXT_PARAM_VM:
2267 ret = get_ppgtt(file_priv, ctx, args);
2270 case I915_CONTEXT_PARAM_ENGINES:
2271 ret = get_engines(ctx, args);
2274 case I915_CONTEXT_PARAM_BAN_PERIOD:
2280 i915_gem_context_put(ctx);
2284 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2285 struct drm_file *file)
2287 struct drm_i915_file_private *file_priv = file->driver_priv;
2288 struct drm_i915_gem_context_param *args = data;
2289 struct i915_gem_context *ctx;
2292 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2296 ret = ctx_setparam(file_priv, ctx, args);
2298 i915_gem_context_put(ctx);
2302 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2303 void *data, struct drm_file *file)
2305 struct drm_i915_private *dev_priv = to_i915(dev);
2306 struct drm_i915_reset_stats *args = data;
2307 struct i915_gem_context *ctx;
2310 if (args->flags || args->pad)
2315 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2320 * We opt for unserialised reads here. This may result in tearing
2321 * in the extremely unlikely event of a GPU hang on this context
2322 * as we are querying them. If we need that extra layer of protection,
2323 * we should wrap the hangstats with a seqlock.
2326 if (capable(CAP_SYS_ADMIN))
2327 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2329 args->reset_count = 0;
2331 args->batch_active = atomic_read(&ctx->guilty_count);
2332 args->batch_pending = atomic_read(&ctx->active_count);
2340 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2342 struct drm_i915_private *i915 = ctx->i915;
2345 mutex_lock(&i915->contexts.mutex);
2347 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2349 if (list_empty(&ctx->hw_id_link)) {
2350 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2352 err = assign_hw_id(i915, &ctx->hw_id);
2356 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2359 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2360 atomic_inc(&ctx->hw_id_pin_count);
2363 mutex_unlock(&i915->contexts.mutex);
2367 /* GEM context-engines iterator: for_each_gem_engine() */
2368 struct intel_context *
2369 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2371 const struct i915_gem_engines *e = it->engines;
2372 struct intel_context *ctx;
2375 if (it->idx >= e->num_engines)
2378 ctx = e->engines[it->idx++];
2384 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2385 #include "selftests/mock_context.c"
2386 #include "selftests/i915_gem_context.c"
2389 static void i915_global_gem_context_shrink(void)
2391 kmem_cache_shrink(global.slab_luts);
2394 static void i915_global_gem_context_exit(void)
2396 kmem_cache_destroy(global.slab_luts);
2399 static struct i915_global_gem_context global = { {
2400 .shrink = i915_global_gem_context_shrink,
2401 .exit = i915_global_gem_context_exit,
2404 int __init i915_global_gem_context_init(void)
2406 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2407 if (!global.slab_luts)
2410 i915_global_register(&global.base);