Merge drm/drm-next into drm-intel-next-queued
[linux-block.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include "gt/gen6_ppgtt.h"
71 #include "gt/intel_context.h"
72 #include "gt/intel_context_param.h"
73 #include "gt/intel_engine_heartbeat.h"
74 #include "gt/intel_engine_user.h"
75 #include "gt/intel_ring.h"
76
77 #include "i915_gem_context.h"
78 #include "i915_globals.h"
79 #include "i915_trace.h"
80 #include "i915_user_extensions.h"
81
82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
83
84 static struct i915_global_gem_context {
85         struct i915_global base;
86         struct kmem_cache *slab_luts;
87 } global;
88
89 struct i915_lut_handle *i915_lut_handle_alloc(void)
90 {
91         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
92 }
93
94 void i915_lut_handle_free(struct i915_lut_handle *lut)
95 {
96         return kmem_cache_free(global.slab_luts, lut);
97 }
98
99 static void lut_close(struct i915_gem_context *ctx)
100 {
101         struct radix_tree_iter iter;
102         void __rcu **slot;
103
104         lockdep_assert_held(&ctx->mutex);
105
106         rcu_read_lock();
107         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
108                 struct i915_vma *vma = rcu_dereference_raw(*slot);
109                 struct drm_i915_gem_object *obj = vma->obj;
110                 struct i915_lut_handle *lut;
111
112                 if (!kref_get_unless_zero(&obj->base.refcount))
113                         continue;
114
115                 rcu_read_unlock();
116                 i915_gem_object_lock(obj);
117                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
118                         if (lut->ctx != ctx)
119                                 continue;
120
121                         if (lut->handle != iter.index)
122                                 continue;
123
124                         list_del(&lut->obj_link);
125                         break;
126                 }
127                 i915_gem_object_unlock(obj);
128                 rcu_read_lock();
129
130                 if (&lut->obj_link != &obj->lut_list) {
131                         i915_lut_handle_free(lut);
132                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
133                         i915_vma_close(vma);
134                         i915_gem_object_put(obj);
135                 }
136
137                 i915_gem_object_put(obj);
138         }
139         rcu_read_unlock();
140 }
141
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
144                    unsigned long flags,
145                    const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
147 {
148         int idx;
149
150         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151                 return ERR_PTR(-EINVAL);
152
153         if (!i915_gem_context_user_engines(ctx)) {
154                 struct intel_engine_cs *engine;
155
156                 engine = intel_engine_lookup_user(ctx->i915,
157                                                   ci->engine_class,
158                                                   ci->engine_instance);
159                 if (!engine)
160                         return ERR_PTR(-EINVAL);
161
162                 idx = engine->legacy_idx;
163         } else {
164                 idx = ci->engine_instance;
165         }
166
167         return i915_gem_context_get_engine(ctx, idx);
168 }
169
170 static struct i915_address_space *
171 context_get_vm_rcu(struct i915_gem_context *ctx)
172 {
173         GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
174
175         do {
176                 struct i915_address_space *vm;
177
178                 /*
179                  * We do not allow downgrading from full-ppgtt [to a shared
180                  * global gtt], so ctx->vm cannot become NULL.
181                  */
182                 vm = rcu_dereference(ctx->vm);
183                 if (!kref_get_unless_zero(&vm->ref))
184                         continue;
185
186                 /*
187                  * This ppgtt may have be reallocated between
188                  * the read and the kref, and reassigned to a third
189                  * context. In order to avoid inadvertent sharing
190                  * of this ppgtt with that third context (and not
191                  * src), we have to confirm that we have the same
192                  * ppgtt after passing through the strong memory
193                  * barrier implied by a successful
194                  * kref_get_unless_zero().
195                  *
196                  * Once we have acquired the current ppgtt of ctx,
197                  * we no longer care if it is released from ctx, as
198                  * it cannot be reallocated elsewhere.
199                  */
200
201                 if (vm == rcu_access_pointer(ctx->vm))
202                         return rcu_pointer_handoff(vm);
203
204                 i915_vm_put(vm);
205         } while (1);
206 }
207
208 static void intel_context_set_gem(struct intel_context *ce,
209                                   struct i915_gem_context *ctx)
210 {
211         GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
212         RCU_INIT_POINTER(ce->gem_context, ctx);
213
214         if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
215                 ce->ring = __intel_context_ring_size(SZ_16K);
216
217         if (rcu_access_pointer(ctx->vm)) {
218                 struct i915_address_space *vm;
219
220                 rcu_read_lock();
221                 vm = context_get_vm_rcu(ctx); /* hmm */
222                 rcu_read_unlock();
223
224                 i915_vm_put(ce->vm);
225                 ce->vm = vm;
226         }
227
228         GEM_BUG_ON(ce->timeline);
229         if (ctx->timeline)
230                 ce->timeline = intel_timeline_get(ctx->timeline);
231
232         if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
233             intel_engine_has_timeslices(ce->engine))
234                 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
235 }
236
237 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
238 {
239         while (count--) {
240                 if (!e->engines[count])
241                         continue;
242
243                 intel_context_put(e->engines[count]);
244         }
245         kfree(e);
246 }
247
248 static void free_engines(struct i915_gem_engines *e)
249 {
250         __free_engines(e, e->num_engines);
251 }
252
253 static void free_engines_rcu(struct rcu_head *rcu)
254 {
255         struct i915_gem_engines *engines =
256                 container_of(rcu, struct i915_gem_engines, rcu);
257
258         i915_sw_fence_fini(&engines->fence);
259         free_engines(engines);
260 }
261
262 static int __i915_sw_fence_call
263 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
264 {
265         struct i915_gem_engines *engines =
266                 container_of(fence, typeof(*engines), fence);
267
268         switch (state) {
269         case FENCE_COMPLETE:
270                 if (!list_empty(&engines->link)) {
271                         struct i915_gem_context *ctx = engines->ctx;
272                         unsigned long flags;
273
274                         spin_lock_irqsave(&ctx->stale.lock, flags);
275                         list_del(&engines->link);
276                         spin_unlock_irqrestore(&ctx->stale.lock, flags);
277                 }
278                 i915_gem_context_put(engines->ctx);
279                 break;
280
281         case FENCE_FREE:
282                 init_rcu_head(&engines->rcu);
283                 call_rcu(&engines->rcu, free_engines_rcu);
284                 break;
285         }
286
287         return NOTIFY_DONE;
288 }
289
290 static struct i915_gem_engines *alloc_engines(unsigned int count)
291 {
292         struct i915_gem_engines *e;
293
294         e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
295         if (!e)
296                 return NULL;
297
298         i915_sw_fence_init(&e->fence, engines_notify);
299         return e;
300 }
301
302 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
303 {
304         const struct intel_gt *gt = &ctx->i915->gt;
305         struct intel_engine_cs *engine;
306         struct i915_gem_engines *e;
307         enum intel_engine_id id;
308
309         e = alloc_engines(I915_NUM_ENGINES);
310         if (!e)
311                 return ERR_PTR(-ENOMEM);
312
313         for_each_engine(engine, gt, id) {
314                 struct intel_context *ce;
315
316                 if (engine->legacy_idx == INVALID_ENGINE)
317                         continue;
318
319                 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
320                 GEM_BUG_ON(e->engines[engine->legacy_idx]);
321
322                 ce = intel_context_create(engine);
323                 if (IS_ERR(ce)) {
324                         __free_engines(e, e->num_engines + 1);
325                         return ERR_CAST(ce);
326                 }
327
328                 intel_context_set_gem(ce, ctx);
329
330                 e->engines[engine->legacy_idx] = ce;
331                 e->num_engines = max(e->num_engines, engine->legacy_idx);
332         }
333         e->num_engines++;
334
335         return e;
336 }
337
338 static void i915_gem_context_free(struct i915_gem_context *ctx)
339 {
340         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
341
342         spin_lock(&ctx->i915->gem.contexts.lock);
343         list_del(&ctx->link);
344         spin_unlock(&ctx->i915->gem.contexts.lock);
345
346         mutex_destroy(&ctx->engines_mutex);
347
348         if (ctx->timeline)
349                 intel_timeline_put(ctx->timeline);
350
351         put_pid(ctx->pid);
352         mutex_destroy(&ctx->mutex);
353
354         kfree_rcu(ctx, rcu);
355 }
356
357 static void contexts_free_all(struct llist_node *list)
358 {
359         struct i915_gem_context *ctx, *cn;
360
361         llist_for_each_entry_safe(ctx, cn, list, free_link)
362                 i915_gem_context_free(ctx);
363 }
364
365 static void contexts_flush_free(struct i915_gem_contexts *gc)
366 {
367         contexts_free_all(llist_del_all(&gc->free_list));
368 }
369
370 static void contexts_free_worker(struct work_struct *work)
371 {
372         struct i915_gem_contexts *gc =
373                 container_of(work, typeof(*gc), free_work);
374
375         contexts_flush_free(gc);
376 }
377
378 void i915_gem_context_release(struct kref *ref)
379 {
380         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
381         struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
382
383         trace_i915_context_free(ctx);
384         if (llist_add(&ctx->free_link, &gc->free_list))
385                 schedule_work(&gc->free_work);
386 }
387
388 static inline struct i915_gem_engines *
389 __context_engines_static(const struct i915_gem_context *ctx)
390 {
391         return rcu_dereference_protected(ctx->engines, true);
392 }
393
394 static bool __reset_engine(struct intel_engine_cs *engine)
395 {
396         struct intel_gt *gt = engine->gt;
397         bool success = false;
398
399         if (!intel_has_reset_engine(gt))
400                 return false;
401
402         if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
403                               &gt->reset.flags)) {
404                 success = intel_engine_reset(engine, NULL) == 0;
405                 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
406                                       &gt->reset.flags);
407         }
408
409         return success;
410 }
411
412 static void __reset_context(struct i915_gem_context *ctx,
413                             struct intel_engine_cs *engine)
414 {
415         intel_gt_handle_error(engine->gt, engine->mask, 0,
416                               "context closure in %s", ctx->name);
417 }
418
419 static bool __cancel_engine(struct intel_engine_cs *engine)
420 {
421         /*
422          * Send a "high priority pulse" down the engine to cause the
423          * current request to be momentarily preempted. (If it fails to
424          * be preempted, it will be reset). As we have marked our context
425          * as banned, any incomplete request, including any running, will
426          * be skipped following the preemption.
427          *
428          * If there is no hangchecking (one of the reasons why we try to
429          * cancel the context) and no forced preemption, there may be no
430          * means by which we reset the GPU and evict the persistent hog.
431          * Ergo if we are unable to inject a preemptive pulse that can
432          * kill the banned context, we fallback to doing a local reset
433          * instead.
434          */
435         if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
436             !intel_engine_pulse(engine))
437                 return true;
438
439         /* If we are unable to send a pulse, try resetting this engine. */
440         return __reset_engine(engine);
441 }
442
443 static struct intel_engine_cs *__active_engine(struct i915_request *rq)
444 {
445         struct intel_engine_cs *engine, *locked;
446
447         /*
448          * Serialise with __i915_request_submit() so that it sees
449          * is-banned?, or we know the request is already inflight.
450          */
451         locked = READ_ONCE(rq->engine);
452         spin_lock_irq(&locked->active.lock);
453         while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
454                 spin_unlock(&locked->active.lock);
455                 spin_lock(&engine->active.lock);
456                 locked = engine;
457         }
458
459         engine = NULL;
460         if (i915_request_is_active(rq) && rq->fence.error != -EIO)
461                 engine = rq->engine;
462
463         spin_unlock_irq(&locked->active.lock);
464
465         return engine;
466 }
467
468 static struct intel_engine_cs *active_engine(struct intel_context *ce)
469 {
470         struct intel_engine_cs *engine = NULL;
471         struct i915_request *rq;
472
473         if (!ce->timeline)
474                 return NULL;
475
476         mutex_lock(&ce->timeline->mutex);
477         list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
478                 if (i915_request_completed(rq))
479                         break;
480
481                 /* Check with the backend if the request is inflight */
482                 engine = __active_engine(rq);
483                 if (engine)
484                         break;
485         }
486         mutex_unlock(&ce->timeline->mutex);
487
488         return engine;
489 }
490
491 static void kill_engines(struct i915_gem_engines *engines)
492 {
493         struct i915_gem_engines_iter it;
494         struct intel_context *ce;
495
496         /*
497          * Map the user's engine back to the actual engines; one virtual
498          * engine will be mapped to multiple engines, and using ctx->engine[]
499          * the same engine may be have multiple instances in the user's map.
500          * However, we only care about pending requests, so only include
501          * engines on which there are incomplete requests.
502          */
503         for_each_gem_engine(ce, engines, it) {
504                 struct intel_engine_cs *engine;
505
506                 if (intel_context_set_banned(ce))
507                         continue;
508
509                 /*
510                  * Check the current active state of this context; if we
511                  * are currently executing on the GPU we need to evict
512                  * ourselves. On the other hand, if we haven't yet been
513                  * submitted to the GPU or if everything is complete,
514                  * we have nothing to do.
515                  */
516                 engine = active_engine(ce);
517
518                 /* First attempt to gracefully cancel the context */
519                 if (engine && !__cancel_engine(engine))
520                         /*
521                          * If we are unable to send a preemptive pulse to bump
522                          * the context from the GPU, we have to resort to a full
523                          * reset. We hope the collateral damage is worth it.
524                          */
525                         __reset_context(engines->ctx, engine);
526         }
527 }
528
529 static void kill_stale_engines(struct i915_gem_context *ctx)
530 {
531         struct i915_gem_engines *pos, *next;
532
533         spin_lock_irq(&ctx->stale.lock);
534         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
535         list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
536                 if (!i915_sw_fence_await(&pos->fence)) {
537                         list_del_init(&pos->link);
538                         continue;
539                 }
540
541                 spin_unlock_irq(&ctx->stale.lock);
542
543                 kill_engines(pos);
544
545                 spin_lock_irq(&ctx->stale.lock);
546                 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
547                 list_safe_reset_next(pos, next, link);
548                 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
549
550                 i915_sw_fence_complete(&pos->fence);
551         }
552         spin_unlock_irq(&ctx->stale.lock);
553 }
554
555 static void kill_context(struct i915_gem_context *ctx)
556 {
557         kill_stale_engines(ctx);
558 }
559
560 static void engines_idle_release(struct i915_gem_context *ctx,
561                                  struct i915_gem_engines *engines)
562 {
563         struct i915_gem_engines_iter it;
564         struct intel_context *ce;
565
566         INIT_LIST_HEAD(&engines->link);
567
568         engines->ctx = i915_gem_context_get(ctx);
569
570         for_each_gem_engine(ce, engines, it) {
571                 int err;
572
573                 /* serialises with execbuf */
574                 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
575                 if (!intel_context_pin_if_active(ce))
576                         continue;
577
578                 /* Wait until context is finally scheduled out and retired */
579                 err = i915_sw_fence_await_active(&engines->fence,
580                                                  &ce->active,
581                                                  I915_ACTIVE_AWAIT_BARRIER);
582                 intel_context_unpin(ce);
583                 if (err)
584                         goto kill;
585         }
586
587         spin_lock_irq(&ctx->stale.lock);
588         if (!i915_gem_context_is_closed(ctx))
589                 list_add_tail(&engines->link, &ctx->stale.engines);
590         spin_unlock_irq(&ctx->stale.lock);
591
592 kill:
593         if (list_empty(&engines->link)) /* raced, already closed */
594                 kill_engines(engines);
595
596         i915_sw_fence_commit(&engines->fence);
597 }
598
599 static void set_closed_name(struct i915_gem_context *ctx)
600 {
601         char *s;
602
603         /* Replace '[]' with '<>' to indicate closed in debug prints */
604
605         s = strrchr(ctx->name, '[');
606         if (!s)
607                 return;
608
609         *s = '<';
610
611         s = strchr(s + 1, ']');
612         if (s)
613                 *s = '>';
614 }
615
616 static void context_close(struct i915_gem_context *ctx)
617 {
618         struct i915_address_space *vm;
619
620         /* Flush any concurrent set_engines() */
621         mutex_lock(&ctx->engines_mutex);
622         engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
623         i915_gem_context_set_closed(ctx);
624         mutex_unlock(&ctx->engines_mutex);
625
626         mutex_lock(&ctx->mutex);
627
628         set_closed_name(ctx);
629
630         vm = i915_gem_context_vm(ctx);
631         if (vm)
632                 i915_vm_close(vm);
633
634         ctx->file_priv = ERR_PTR(-EBADF);
635
636         /*
637          * The LUT uses the VMA as a backpointer to unref the object,
638          * so we need to clear the LUT before we close all the VMA (inside
639          * the ppgtt).
640          */
641         lut_close(ctx);
642
643         mutex_unlock(&ctx->mutex);
644
645         /*
646          * If the user has disabled hangchecking, we can not be sure that
647          * the batches will ever complete after the context is closed,
648          * keeping the context and all resources pinned forever. So in this
649          * case we opt to forcibly kill off all remaining requests on
650          * context close.
651          */
652         if (!i915_gem_context_is_persistent(ctx) ||
653             !ctx->i915->params.enable_hangcheck)
654                 kill_context(ctx);
655
656         i915_gem_context_put(ctx);
657 }
658
659 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
660 {
661         if (i915_gem_context_is_persistent(ctx) == state)
662                 return 0;
663
664         if (state) {
665                 /*
666                  * Only contexts that are short-lived [that will expire or be
667                  * reset] are allowed to survive past termination. We require
668                  * hangcheck to ensure that the persistent requests are healthy.
669                  */
670                 if (!ctx->i915->params.enable_hangcheck)
671                         return -EINVAL;
672
673                 i915_gem_context_set_persistence(ctx);
674         } else {
675                 /* To cancel a context we use "preempt-to-idle" */
676                 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
677                         return -ENODEV;
678
679                 /*
680                  * If the cancel fails, we then need to reset, cleanly!
681                  *
682                  * If the per-engine reset fails, all hope is lost! We resort
683                  * to a full GPU reset in that unlikely case, but realistically
684                  * if the engine could not reset, the full reset does not fare
685                  * much better. The damage has been done.
686                  *
687                  * However, if we cannot reset an engine by itself, we cannot
688                  * cleanup a hanging persistent context without causing
689                  * colateral damage, and we should not pretend we can by
690                  * exposing the interface.
691                  */
692                 if (!intel_has_reset_engine(&ctx->i915->gt))
693                         return -ENODEV;
694
695                 i915_gem_context_clear_persistence(ctx);
696         }
697
698         return 0;
699 }
700
701 static struct i915_gem_context *
702 __create_context(struct drm_i915_private *i915)
703 {
704         struct i915_gem_context *ctx;
705         struct i915_gem_engines *e;
706         int err;
707         int i;
708
709         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
710         if (!ctx)
711                 return ERR_PTR(-ENOMEM);
712
713         kref_init(&ctx->ref);
714         ctx->i915 = i915;
715         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
716         mutex_init(&ctx->mutex);
717
718         spin_lock_init(&ctx->stale.lock);
719         INIT_LIST_HEAD(&ctx->stale.engines);
720
721         mutex_init(&ctx->engines_mutex);
722         e = default_engines(ctx);
723         if (IS_ERR(e)) {
724                 err = PTR_ERR(e);
725                 goto err_free;
726         }
727         RCU_INIT_POINTER(ctx->engines, e);
728
729         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
730
731         /* NB: Mark all slices as needing a remap so that when the context first
732          * loads it will restore whatever remap state already exists. If there
733          * is no remap info, it will be a NOP. */
734         ctx->remap_slice = ALL_L3_SLICES(i915);
735
736         i915_gem_context_set_bannable(ctx);
737         i915_gem_context_set_recoverable(ctx);
738         __context_set_persistence(ctx, true /* cgroup hook? */);
739
740         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
741                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
742
743         spin_lock(&i915->gem.contexts.lock);
744         list_add_tail(&ctx->link, &i915->gem.contexts.list);
745         spin_unlock(&i915->gem.contexts.lock);
746
747         return ctx;
748
749 err_free:
750         kfree(ctx);
751         return ERR_PTR(err);
752 }
753
754 static inline struct i915_gem_engines *
755 __context_engines_await(const struct i915_gem_context *ctx)
756 {
757         struct i915_gem_engines *engines;
758
759         rcu_read_lock();
760         do {
761                 engines = rcu_dereference(ctx->engines);
762                 GEM_BUG_ON(!engines);
763
764                 if (unlikely(!i915_sw_fence_await(&engines->fence)))
765                         continue;
766
767                 if (likely(engines == rcu_access_pointer(ctx->engines)))
768                         break;
769
770                 i915_sw_fence_complete(&engines->fence);
771         } while (1);
772         rcu_read_unlock();
773
774         return engines;
775 }
776
777 static int
778 context_apply_all(struct i915_gem_context *ctx,
779                   int (*fn)(struct intel_context *ce, void *data),
780                   void *data)
781 {
782         struct i915_gem_engines_iter it;
783         struct i915_gem_engines *e;
784         struct intel_context *ce;
785         int err = 0;
786
787         e = __context_engines_await(ctx);
788         for_each_gem_engine(ce, e, it) {
789                 err = fn(ce, data);
790                 if (err)
791                         break;
792         }
793         i915_sw_fence_complete(&e->fence);
794
795         return err;
796 }
797
798 static int __apply_ppgtt(struct intel_context *ce, void *vm)
799 {
800         i915_vm_put(ce->vm);
801         ce->vm = i915_vm_get(vm);
802         return 0;
803 }
804
805 static struct i915_address_space *
806 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
807 {
808         struct i915_address_space *old;
809
810         old = rcu_replace_pointer(ctx->vm,
811                                   i915_vm_open(vm),
812                                   lockdep_is_held(&ctx->mutex));
813         GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
814
815         context_apply_all(ctx, __apply_ppgtt, vm);
816
817         return old;
818 }
819
820 static void __assign_ppgtt(struct i915_gem_context *ctx,
821                            struct i915_address_space *vm)
822 {
823         if (vm == rcu_access_pointer(ctx->vm))
824                 return;
825
826         vm = __set_ppgtt(ctx, vm);
827         if (vm)
828                 i915_vm_close(vm);
829 }
830
831 static void __set_timeline(struct intel_timeline **dst,
832                            struct intel_timeline *src)
833 {
834         struct intel_timeline *old = *dst;
835
836         *dst = src ? intel_timeline_get(src) : NULL;
837
838         if (old)
839                 intel_timeline_put(old);
840 }
841
842 static int __apply_timeline(struct intel_context *ce, void *timeline)
843 {
844         __set_timeline(&ce->timeline, timeline);
845         return 0;
846 }
847
848 static void __assign_timeline(struct i915_gem_context *ctx,
849                               struct intel_timeline *timeline)
850 {
851         __set_timeline(&ctx->timeline, timeline);
852         context_apply_all(ctx, __apply_timeline, timeline);
853 }
854
855 static struct i915_gem_context *
856 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
857 {
858         struct i915_gem_context *ctx;
859
860         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
861             !HAS_EXECLISTS(i915))
862                 return ERR_PTR(-EINVAL);
863
864         /* Reap the stale contexts */
865         contexts_flush_free(&i915->gem.contexts);
866
867         ctx = __create_context(i915);
868         if (IS_ERR(ctx))
869                 return ctx;
870
871         if (HAS_FULL_PPGTT(i915)) {
872                 struct i915_ppgtt *ppgtt;
873
874                 ppgtt = i915_ppgtt_create(&i915->gt);
875                 if (IS_ERR(ppgtt)) {
876                         drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
877                                 PTR_ERR(ppgtt));
878                         context_close(ctx);
879                         return ERR_CAST(ppgtt);
880                 }
881
882                 mutex_lock(&ctx->mutex);
883                 __assign_ppgtt(ctx, &ppgtt->vm);
884                 mutex_unlock(&ctx->mutex);
885
886                 i915_vm_put(&ppgtt->vm);
887         }
888
889         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
890                 struct intel_timeline *timeline;
891
892                 timeline = intel_timeline_create(&i915->gt, NULL);
893                 if (IS_ERR(timeline)) {
894                         context_close(ctx);
895                         return ERR_CAST(timeline);
896                 }
897
898                 __assign_timeline(ctx, timeline);
899                 intel_timeline_put(timeline);
900         }
901
902         trace_i915_context_create(ctx);
903
904         return ctx;
905 }
906
907 static void init_contexts(struct i915_gem_contexts *gc)
908 {
909         spin_lock_init(&gc->lock);
910         INIT_LIST_HEAD(&gc->list);
911
912         INIT_WORK(&gc->free_work, contexts_free_worker);
913         init_llist_head(&gc->free_list);
914 }
915
916 void i915_gem_init__contexts(struct drm_i915_private *i915)
917 {
918         init_contexts(&i915->gem.contexts);
919         drm_dbg(&i915->drm, "%s context support initialized\n",
920                 DRIVER_CAPS(i915)->has_logical_contexts ?
921                 "logical" : "fake");
922 }
923
924 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
925 {
926         flush_work(&i915->gem.contexts.free_work);
927         rcu_barrier(); /* and flush the left over RCU frees */
928 }
929
930 static int gem_context_register(struct i915_gem_context *ctx,
931                                 struct drm_i915_file_private *fpriv,
932                                 u32 *id)
933 {
934         struct i915_address_space *vm;
935         int ret;
936
937         ctx->file_priv = fpriv;
938
939         mutex_lock(&ctx->mutex);
940         vm = i915_gem_context_vm(ctx);
941         if (vm)
942                 WRITE_ONCE(vm->file, fpriv); /* XXX */
943         mutex_unlock(&ctx->mutex);
944
945         ctx->pid = get_task_pid(current, PIDTYPE_PID);
946         snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
947                  current->comm, pid_nr(ctx->pid));
948
949         /* And finally expose ourselves to userspace via the idr */
950         ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
951         if (ret)
952                 put_pid(fetch_and_zero(&ctx->pid));
953
954         return ret;
955 }
956
957 int i915_gem_context_open(struct drm_i915_private *i915,
958                           struct drm_file *file)
959 {
960         struct drm_i915_file_private *file_priv = file->driver_priv;
961         struct i915_gem_context *ctx;
962         int err;
963         u32 id;
964
965         xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
966
967         /* 0 reserved for invalid/unassigned ppgtt */
968         xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
969
970         ctx = i915_gem_create_context(i915, 0);
971         if (IS_ERR(ctx)) {
972                 err = PTR_ERR(ctx);
973                 goto err;
974         }
975
976         err = gem_context_register(ctx, file_priv, &id);
977         if (err < 0)
978                 goto err_ctx;
979
980         GEM_BUG_ON(id);
981         return 0;
982
983 err_ctx:
984         context_close(ctx);
985 err:
986         xa_destroy(&file_priv->vm_xa);
987         xa_destroy(&file_priv->context_xa);
988         return err;
989 }
990
991 void i915_gem_context_close(struct drm_file *file)
992 {
993         struct drm_i915_file_private *file_priv = file->driver_priv;
994         struct drm_i915_private *i915 = file_priv->dev_priv;
995         struct i915_address_space *vm;
996         struct i915_gem_context *ctx;
997         unsigned long idx;
998
999         xa_for_each(&file_priv->context_xa, idx, ctx)
1000                 context_close(ctx);
1001         xa_destroy(&file_priv->context_xa);
1002
1003         xa_for_each(&file_priv->vm_xa, idx, vm)
1004                 i915_vm_put(vm);
1005         xa_destroy(&file_priv->vm_xa);
1006
1007         contexts_flush_free(&i915->gem.contexts);
1008 }
1009
1010 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1011                              struct drm_file *file)
1012 {
1013         struct drm_i915_private *i915 = to_i915(dev);
1014         struct drm_i915_gem_vm_control *args = data;
1015         struct drm_i915_file_private *file_priv = file->driver_priv;
1016         struct i915_ppgtt *ppgtt;
1017         u32 id;
1018         int err;
1019
1020         if (!HAS_FULL_PPGTT(i915))
1021                 return -ENODEV;
1022
1023         if (args->flags)
1024                 return -EINVAL;
1025
1026         ppgtt = i915_ppgtt_create(&i915->gt);
1027         if (IS_ERR(ppgtt))
1028                 return PTR_ERR(ppgtt);
1029
1030         ppgtt->vm.file = file_priv;
1031
1032         if (args->extensions) {
1033                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1034                                            NULL, 0,
1035                                            ppgtt);
1036                 if (err)
1037                         goto err_put;
1038         }
1039
1040         err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1041                        xa_limit_32b, GFP_KERNEL);
1042         if (err)
1043                 goto err_put;
1044
1045         GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1046         args->vm_id = id;
1047         return 0;
1048
1049 err_put:
1050         i915_vm_put(&ppgtt->vm);
1051         return err;
1052 }
1053
1054 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1055                               struct drm_file *file)
1056 {
1057         struct drm_i915_file_private *file_priv = file->driver_priv;
1058         struct drm_i915_gem_vm_control *args = data;
1059         struct i915_address_space *vm;
1060
1061         if (args->flags)
1062                 return -EINVAL;
1063
1064         if (args->extensions)
1065                 return -EINVAL;
1066
1067         vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1068         if (!vm)
1069                 return -ENOENT;
1070
1071         i915_vm_put(vm);
1072         return 0;
1073 }
1074
1075 struct context_barrier_task {
1076         struct i915_active base;
1077         void (*task)(void *data);
1078         void *data;
1079 };
1080
1081 __i915_active_call
1082 static void cb_retire(struct i915_active *base)
1083 {
1084         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1085
1086         if (cb->task)
1087                 cb->task(cb->data);
1088
1089         i915_active_fini(&cb->base);
1090         kfree(cb);
1091 }
1092
1093 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
1094 static int context_barrier_task(struct i915_gem_context *ctx,
1095                                 intel_engine_mask_t engines,
1096                                 bool (*skip)(struct intel_context *ce, void *data),
1097                                 int (*emit)(struct i915_request *rq, void *data),
1098                                 void (*task)(void *data),
1099                                 void *data)
1100 {
1101         struct context_barrier_task *cb;
1102         struct i915_gem_engines_iter it;
1103         struct i915_gem_engines *e;
1104         struct intel_context *ce;
1105         int err = 0;
1106
1107         GEM_BUG_ON(!task);
1108
1109         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1110         if (!cb)
1111                 return -ENOMEM;
1112
1113         i915_active_init(&cb->base, NULL, cb_retire);
1114         err = i915_active_acquire(&cb->base);
1115         if (err) {
1116                 kfree(cb);
1117                 return err;
1118         }
1119
1120         e = __context_engines_await(ctx);
1121         if (!e) {
1122                 i915_active_release(&cb->base);
1123                 return -ENOENT;
1124         }
1125
1126         for_each_gem_engine(ce, e, it) {
1127                 struct i915_request *rq;
1128
1129                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1130                                        ce->engine->mask)) {
1131                         err = -ENXIO;
1132                         break;
1133                 }
1134
1135                 if (!(ce->engine->mask & engines))
1136                         continue;
1137
1138                 if (skip && skip(ce, data))
1139                         continue;
1140
1141                 rq = intel_context_create_request(ce);
1142                 if (IS_ERR(rq)) {
1143                         err = PTR_ERR(rq);
1144                         break;
1145                 }
1146
1147                 err = 0;
1148                 if (emit)
1149                         err = emit(rq, data);
1150                 if (err == 0)
1151                         err = i915_active_add_request(&cb->base, rq);
1152
1153                 i915_request_add(rq);
1154                 if (err)
1155                         break;
1156         }
1157         i915_sw_fence_complete(&e->fence);
1158
1159         cb->task = err ? NULL : task; /* caller needs to unwind instead */
1160         cb->data = data;
1161
1162         i915_active_release(&cb->base);
1163
1164         return err;
1165 }
1166
1167 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1168                      struct i915_gem_context *ctx,
1169                      struct drm_i915_gem_context_param *args)
1170 {
1171         struct i915_address_space *vm;
1172         int err;
1173         u32 id;
1174
1175         if (!rcu_access_pointer(ctx->vm))
1176                 return -ENODEV;
1177
1178         rcu_read_lock();
1179         vm = context_get_vm_rcu(ctx);
1180         rcu_read_unlock();
1181         if (!vm)
1182                 return -ENODEV;
1183
1184         err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1185         if (err)
1186                 goto err_put;
1187
1188         i915_vm_open(vm);
1189
1190         GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1191         args->value = id;
1192         args->size = 0;
1193
1194 err_put:
1195         i915_vm_put(vm);
1196         return err;
1197 }
1198
1199 static void set_ppgtt_barrier(void *data)
1200 {
1201         struct i915_address_space *old = data;
1202
1203         if (INTEL_GEN(old->i915) < 8)
1204                 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1205
1206         i915_vm_close(old);
1207 }
1208
1209 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1210 {
1211         struct i915_address_space *vm = rq->context->vm;
1212         struct intel_engine_cs *engine = rq->engine;
1213         u32 base = engine->mmio_base;
1214         u32 *cs;
1215         int i;
1216
1217         if (i915_vm_is_4lvl(vm)) {
1218                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1219                 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1220
1221                 cs = intel_ring_begin(rq, 6);
1222                 if (IS_ERR(cs))
1223                         return PTR_ERR(cs);
1224
1225                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1226
1227                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1228                 *cs++ = upper_32_bits(pd_daddr);
1229                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1230                 *cs++ = lower_32_bits(pd_daddr);
1231
1232                 *cs++ = MI_NOOP;
1233                 intel_ring_advance(rq, cs);
1234         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1235                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1236                 int err;
1237
1238                 /* Magic required to prevent forcewake errors! */
1239                 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1240                 if (err)
1241                         return err;
1242
1243                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1244                 if (IS_ERR(cs))
1245                         return PTR_ERR(cs);
1246
1247                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1248                 for (i = GEN8_3LVL_PDPES; i--; ) {
1249                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1250
1251                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1252                         *cs++ = upper_32_bits(pd_daddr);
1253                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1254                         *cs++ = lower_32_bits(pd_daddr);
1255                 }
1256                 *cs++ = MI_NOOP;
1257                 intel_ring_advance(rq, cs);
1258         }
1259
1260         return 0;
1261 }
1262
1263 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1264 {
1265         if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
1266                 return true;
1267
1268         if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1269                 return false;
1270
1271         if (!atomic_read(&ce->pin_count))
1272                 return true;
1273
1274         /* ppGTT is not part of the legacy context image */
1275         if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
1276                 return true;
1277
1278         return false;
1279 }
1280
1281 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1282                      struct i915_gem_context *ctx,
1283                      struct drm_i915_gem_context_param *args)
1284 {
1285         struct i915_address_space *vm, *old;
1286         int err;
1287
1288         if (args->size)
1289                 return -EINVAL;
1290
1291         if (!rcu_access_pointer(ctx->vm))
1292                 return -ENODEV;
1293
1294         if (upper_32_bits(args->value))
1295                 return -ENOENT;
1296
1297         rcu_read_lock();
1298         vm = xa_load(&file_priv->vm_xa, args->value);
1299         if (vm && !kref_get_unless_zero(&vm->ref))
1300                 vm = NULL;
1301         rcu_read_unlock();
1302         if (!vm)
1303                 return -ENOENT;
1304
1305         err = mutex_lock_interruptible(&ctx->mutex);
1306         if (err)
1307                 goto out;
1308
1309         if (i915_gem_context_is_closed(ctx)) {
1310                 err = -ENOENT;
1311                 goto unlock;
1312         }
1313
1314         if (vm == rcu_access_pointer(ctx->vm))
1315                 goto unlock;
1316
1317         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1318         lut_close(ctx);
1319
1320         old = __set_ppgtt(ctx, vm);
1321
1322         /*
1323          * We need to flush any requests using the current ppgtt before
1324          * we release it as the requests do not hold a reference themselves,
1325          * only indirectly through the context.
1326          */
1327         err = context_barrier_task(ctx, ALL_ENGINES,
1328                                    skip_ppgtt_update,
1329                                    emit_ppgtt_update,
1330                                    set_ppgtt_barrier,
1331                                    old);
1332         if (err) {
1333                 i915_vm_close(__set_ppgtt(ctx, old));
1334                 i915_vm_close(old);
1335         }
1336
1337 unlock:
1338         mutex_unlock(&ctx->mutex);
1339 out:
1340         i915_vm_put(vm);
1341         return err;
1342 }
1343
1344 static int __apply_ringsize(struct intel_context *ce, void *sz)
1345 {
1346         return intel_context_set_ring_size(ce, (unsigned long)sz);
1347 }
1348
1349 static int set_ringsize(struct i915_gem_context *ctx,
1350                         struct drm_i915_gem_context_param *args)
1351 {
1352         if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1353                 return -ENODEV;
1354
1355         if (args->size)
1356                 return -EINVAL;
1357
1358         if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
1359                 return -EINVAL;
1360
1361         if (args->value < I915_GTT_PAGE_SIZE)
1362                 return -EINVAL;
1363
1364         if (args->value > 128 * I915_GTT_PAGE_SIZE)
1365                 return -EINVAL;
1366
1367         return context_apply_all(ctx,
1368                                  __apply_ringsize,
1369                                  __intel_context_ring_size(args->value));
1370 }
1371
1372 static int __get_ringsize(struct intel_context *ce, void *arg)
1373 {
1374         long sz;
1375
1376         sz = intel_context_get_ring_size(ce);
1377         GEM_BUG_ON(sz > INT_MAX);
1378
1379         return sz; /* stop on first engine */
1380 }
1381
1382 static int get_ringsize(struct i915_gem_context *ctx,
1383                         struct drm_i915_gem_context_param *args)
1384 {
1385         int sz;
1386
1387         if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1388                 return -ENODEV;
1389
1390         if (args->size)
1391                 return -EINVAL;
1392
1393         sz = context_apply_all(ctx, __get_ringsize, NULL);
1394         if (sz < 0)
1395                 return sz;
1396
1397         args->value = sz;
1398         return 0;
1399 }
1400
1401 int
1402 i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
1403                               const struct drm_i915_gem_context_param_sseu *user,
1404                               struct intel_sseu *context)
1405 {
1406         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1407
1408         /* No zeros in any field. */
1409         if (!user->slice_mask || !user->subslice_mask ||
1410             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1411                 return -EINVAL;
1412
1413         /* Max > min. */
1414         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1415                 return -EINVAL;
1416
1417         /*
1418          * Some future proofing on the types since the uAPI is wider than the
1419          * current internal implementation.
1420          */
1421         if (overflows_type(user->slice_mask, context->slice_mask) ||
1422             overflows_type(user->subslice_mask, context->subslice_mask) ||
1423             overflows_type(user->min_eus_per_subslice,
1424                            context->min_eus_per_subslice) ||
1425             overflows_type(user->max_eus_per_subslice,
1426                            context->max_eus_per_subslice))
1427                 return -EINVAL;
1428
1429         /* Check validity against hardware. */
1430         if (user->slice_mask & ~device->slice_mask)
1431                 return -EINVAL;
1432
1433         if (user->subslice_mask & ~device->subslice_mask[0])
1434                 return -EINVAL;
1435
1436         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1437                 return -EINVAL;
1438
1439         context->slice_mask = user->slice_mask;
1440         context->subslice_mask = user->subslice_mask;
1441         context->min_eus_per_subslice = user->min_eus_per_subslice;
1442         context->max_eus_per_subslice = user->max_eus_per_subslice;
1443
1444         /* Part specific restrictions. */
1445         if (IS_GEN(i915, 11)) {
1446                 unsigned int hw_s = hweight8(device->slice_mask);
1447                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1448                 unsigned int req_s = hweight8(context->slice_mask);
1449                 unsigned int req_ss = hweight8(context->subslice_mask);
1450
1451                 /*
1452                  * Only full subslice enablement is possible if more than one
1453                  * slice is turned on.
1454                  */
1455                 if (req_s > 1 && req_ss != hw_ss_per_s)
1456                         return -EINVAL;
1457
1458                 /*
1459                  * If more than four (SScount bitfield limit) subslices are
1460                  * requested then the number has to be even.
1461                  */
1462                 if (req_ss > 4 && (req_ss & 1))
1463                         return -EINVAL;
1464
1465                 /*
1466                  * If only one slice is enabled and subslice count is below the
1467                  * device full enablement, it must be at most half of the all
1468                  * available subslices.
1469                  */
1470                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1471                     req_ss > (hw_ss_per_s / 2))
1472                         return -EINVAL;
1473
1474                 /* ABI restriction - VME use case only. */
1475
1476                 /* All slices or one slice only. */
1477                 if (req_s != 1 && req_s != hw_s)
1478                         return -EINVAL;
1479
1480                 /*
1481                  * Half subslices or full enablement only when one slice is
1482                  * enabled.
1483                  */
1484                 if (req_s == 1 &&
1485                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1486                         return -EINVAL;
1487
1488                 /* No EU configuration changes. */
1489                 if ((user->min_eus_per_subslice !=
1490                      device->max_eus_per_subslice) ||
1491                     (user->max_eus_per_subslice !=
1492                      device->max_eus_per_subslice))
1493                         return -EINVAL;
1494         }
1495
1496         return 0;
1497 }
1498
1499 static int set_sseu(struct i915_gem_context *ctx,
1500                     struct drm_i915_gem_context_param *args)
1501 {
1502         struct drm_i915_private *i915 = ctx->i915;
1503         struct drm_i915_gem_context_param_sseu user_sseu;
1504         struct intel_context *ce;
1505         struct intel_sseu sseu;
1506         unsigned long lookup;
1507         int ret;
1508
1509         if (args->size < sizeof(user_sseu))
1510                 return -EINVAL;
1511
1512         if (!IS_GEN(i915, 11))
1513                 return -ENODEV;
1514
1515         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1516                            sizeof(user_sseu)))
1517                 return -EFAULT;
1518
1519         if (user_sseu.rsvd)
1520                 return -EINVAL;
1521
1522         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1523                 return -EINVAL;
1524
1525         lookup = 0;
1526         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1527                 lookup |= LOOKUP_USER_INDEX;
1528
1529         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1530         if (IS_ERR(ce))
1531                 return PTR_ERR(ce);
1532
1533         /* Only render engine supports RPCS configuration. */
1534         if (ce->engine->class != RENDER_CLASS) {
1535                 ret = -ENODEV;
1536                 goto out_ce;
1537         }
1538
1539         ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
1540         if (ret)
1541                 goto out_ce;
1542
1543         ret = intel_context_reconfigure_sseu(ce, sseu);
1544         if (ret)
1545                 goto out_ce;
1546
1547         args->size = sizeof(user_sseu);
1548
1549 out_ce:
1550         intel_context_put(ce);
1551         return ret;
1552 }
1553
1554 struct set_engines {
1555         struct i915_gem_context *ctx;
1556         struct i915_gem_engines *engines;
1557 };
1558
1559 static int
1560 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1561 {
1562         struct i915_context_engines_load_balance __user *ext =
1563                 container_of_user(base, typeof(*ext), base);
1564         const struct set_engines *set = data;
1565         struct drm_i915_private *i915 = set->ctx->i915;
1566         struct intel_engine_cs *stack[16];
1567         struct intel_engine_cs **siblings;
1568         struct intel_context *ce;
1569         u16 num_siblings, idx;
1570         unsigned int n;
1571         int err;
1572
1573         if (!HAS_EXECLISTS(i915))
1574                 return -ENODEV;
1575
1576         if (intel_uc_uses_guc_submission(&i915->gt.uc))
1577                 return -ENODEV; /* not implement yet */
1578
1579         if (get_user(idx, &ext->engine_index))
1580                 return -EFAULT;
1581
1582         if (idx >= set->engines->num_engines) {
1583                 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1584                         idx, set->engines->num_engines);
1585                 return -EINVAL;
1586         }
1587
1588         idx = array_index_nospec(idx, set->engines->num_engines);
1589         if (set->engines->engines[idx]) {
1590                 drm_dbg(&i915->drm,
1591                         "Invalid placement[%d], already occupied\n", idx);
1592                 return -EEXIST;
1593         }
1594
1595         if (get_user(num_siblings, &ext->num_siblings))
1596                 return -EFAULT;
1597
1598         err = check_user_mbz(&ext->flags);
1599         if (err)
1600                 return err;
1601
1602         err = check_user_mbz(&ext->mbz64);
1603         if (err)
1604                 return err;
1605
1606         siblings = stack;
1607         if (num_siblings > ARRAY_SIZE(stack)) {
1608                 siblings = kmalloc_array(num_siblings,
1609                                          sizeof(*siblings),
1610                                          GFP_KERNEL);
1611                 if (!siblings)
1612                         return -ENOMEM;
1613         }
1614
1615         for (n = 0; n < num_siblings; n++) {
1616                 struct i915_engine_class_instance ci;
1617
1618                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1619                         err = -EFAULT;
1620                         goto out_siblings;
1621                 }
1622
1623                 siblings[n] = intel_engine_lookup_user(i915,
1624                                                        ci.engine_class,
1625                                                        ci.engine_instance);
1626                 if (!siblings[n]) {
1627                         drm_dbg(&i915->drm,
1628                                 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
1629                                 n, ci.engine_class, ci.engine_instance);
1630                         err = -EINVAL;
1631                         goto out_siblings;
1632                 }
1633         }
1634
1635         ce = intel_execlists_create_virtual(siblings, n);
1636         if (IS_ERR(ce)) {
1637                 err = PTR_ERR(ce);
1638                 goto out_siblings;
1639         }
1640
1641         intel_context_set_gem(ce, set->ctx);
1642
1643         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1644                 intel_context_put(ce);
1645                 err = -EEXIST;
1646                 goto out_siblings;
1647         }
1648
1649 out_siblings:
1650         if (siblings != stack)
1651                 kfree(siblings);
1652
1653         return err;
1654 }
1655
1656 static int
1657 set_engines__bond(struct i915_user_extension __user *base, void *data)
1658 {
1659         struct i915_context_engines_bond __user *ext =
1660                 container_of_user(base, typeof(*ext), base);
1661         const struct set_engines *set = data;
1662         struct drm_i915_private *i915 = set->ctx->i915;
1663         struct i915_engine_class_instance ci;
1664         struct intel_engine_cs *virtual;
1665         struct intel_engine_cs *master;
1666         u16 idx, num_bonds;
1667         int err, n;
1668
1669         if (get_user(idx, &ext->virtual_index))
1670                 return -EFAULT;
1671
1672         if (idx >= set->engines->num_engines) {
1673                 drm_dbg(&i915->drm,
1674                         "Invalid index for virtual engine: %d >= %d\n",
1675                         idx, set->engines->num_engines);
1676                 return -EINVAL;
1677         }
1678
1679         idx = array_index_nospec(idx, set->engines->num_engines);
1680         if (!set->engines->engines[idx]) {
1681                 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1682                 return -EINVAL;
1683         }
1684         virtual = set->engines->engines[idx]->engine;
1685
1686         err = check_user_mbz(&ext->flags);
1687         if (err)
1688                 return err;
1689
1690         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1691                 err = check_user_mbz(&ext->mbz64[n]);
1692                 if (err)
1693                         return err;
1694         }
1695
1696         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1697                 return -EFAULT;
1698
1699         master = intel_engine_lookup_user(i915,
1700                                           ci.engine_class, ci.engine_instance);
1701         if (!master) {
1702                 drm_dbg(&i915->drm,
1703                         "Unrecognised master engine: { class:%u, instance:%u }\n",
1704                         ci.engine_class, ci.engine_instance);
1705                 return -EINVAL;
1706         }
1707
1708         if (get_user(num_bonds, &ext->num_bonds))
1709                 return -EFAULT;
1710
1711         for (n = 0; n < num_bonds; n++) {
1712                 struct intel_engine_cs *bond;
1713
1714                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1715                         return -EFAULT;
1716
1717                 bond = intel_engine_lookup_user(i915,
1718                                                 ci.engine_class,
1719                                                 ci.engine_instance);
1720                 if (!bond) {
1721                         drm_dbg(&i915->drm,
1722                                 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1723                                 n, ci.engine_class, ci.engine_instance);
1724                         return -EINVAL;
1725                 }
1726
1727                 /*
1728                  * A non-virtual engine has no siblings to choose between; and
1729                  * a submit fence will always be directed to the one engine.
1730                  */
1731                 if (intel_engine_is_virtual(virtual)) {
1732                         err = intel_virtual_engine_attach_bond(virtual,
1733                                                                master,
1734                                                                bond);
1735                         if (err)
1736                                 return err;
1737                 }
1738         }
1739
1740         return 0;
1741 }
1742
1743 static const i915_user_extension_fn set_engines__extensions[] = {
1744         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1745         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1746 };
1747
1748 static int
1749 set_engines(struct i915_gem_context *ctx,
1750             const struct drm_i915_gem_context_param *args)
1751 {
1752         struct drm_i915_private *i915 = ctx->i915;
1753         struct i915_context_param_engines __user *user =
1754                 u64_to_user_ptr(args->value);
1755         struct set_engines set = { .ctx = ctx };
1756         unsigned int num_engines, n;
1757         u64 extensions;
1758         int err;
1759
1760         if (!args->size) { /* switch back to legacy user_ring_map */
1761                 if (!i915_gem_context_user_engines(ctx))
1762                         return 0;
1763
1764                 set.engines = default_engines(ctx);
1765                 if (IS_ERR(set.engines))
1766                         return PTR_ERR(set.engines);
1767
1768                 goto replace;
1769         }
1770
1771         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1772         if (args->size < sizeof(*user) ||
1773             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1774                 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1775                         args->size);
1776                 return -EINVAL;
1777         }
1778
1779         /*
1780          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1781          * first 64 engines defined here.
1782          */
1783         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1784         set.engines = alloc_engines(num_engines);
1785         if (!set.engines)
1786                 return -ENOMEM;
1787
1788         for (n = 0; n < num_engines; n++) {
1789                 struct i915_engine_class_instance ci;
1790                 struct intel_engine_cs *engine;
1791                 struct intel_context *ce;
1792
1793                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1794                         __free_engines(set.engines, n);
1795                         return -EFAULT;
1796                 }
1797
1798                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1799                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1800                         set.engines->engines[n] = NULL;
1801                         continue;
1802                 }
1803
1804                 engine = intel_engine_lookup_user(ctx->i915,
1805                                                   ci.engine_class,
1806                                                   ci.engine_instance);
1807                 if (!engine) {
1808                         drm_dbg(&i915->drm,
1809                                 "Invalid engine[%d]: { class:%d, instance:%d }\n",
1810                                 n, ci.engine_class, ci.engine_instance);
1811                         __free_engines(set.engines, n);
1812                         return -ENOENT;
1813                 }
1814
1815                 ce = intel_context_create(engine);
1816                 if (IS_ERR(ce)) {
1817                         __free_engines(set.engines, n);
1818                         return PTR_ERR(ce);
1819                 }
1820
1821                 intel_context_set_gem(ce, ctx);
1822
1823                 set.engines->engines[n] = ce;
1824         }
1825         set.engines->num_engines = num_engines;
1826
1827         err = -EFAULT;
1828         if (!get_user(extensions, &user->extensions))
1829                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1830                                            set_engines__extensions,
1831                                            ARRAY_SIZE(set_engines__extensions),
1832                                            &set);
1833         if (err) {
1834                 free_engines(set.engines);
1835                 return err;
1836         }
1837
1838 replace:
1839         mutex_lock(&ctx->engines_mutex);
1840         if (i915_gem_context_is_closed(ctx)) {
1841                 mutex_unlock(&ctx->engines_mutex);
1842                 free_engines(set.engines);
1843                 return -ENOENT;
1844         }
1845         if (args->size)
1846                 i915_gem_context_set_user_engines(ctx);
1847         else
1848                 i915_gem_context_clear_user_engines(ctx);
1849         set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1850         mutex_unlock(&ctx->engines_mutex);
1851
1852         /* Keep track of old engine sets for kill_context() */
1853         engines_idle_release(ctx, set.engines);
1854
1855         return 0;
1856 }
1857
1858 static struct i915_gem_engines *
1859 __copy_engines(struct i915_gem_engines *e)
1860 {
1861         struct i915_gem_engines *copy;
1862         unsigned int n;
1863
1864         copy = alloc_engines(e->num_engines);
1865         if (!copy)
1866                 return ERR_PTR(-ENOMEM);
1867
1868         for (n = 0; n < e->num_engines; n++) {
1869                 if (e->engines[n])
1870                         copy->engines[n] = intel_context_get(e->engines[n]);
1871                 else
1872                         copy->engines[n] = NULL;
1873         }
1874         copy->num_engines = n;
1875
1876         return copy;
1877 }
1878
1879 static int
1880 get_engines(struct i915_gem_context *ctx,
1881             struct drm_i915_gem_context_param *args)
1882 {
1883         struct i915_context_param_engines __user *user;
1884         struct i915_gem_engines *e;
1885         size_t n, count, size;
1886         int err = 0;
1887
1888         err = mutex_lock_interruptible(&ctx->engines_mutex);
1889         if (err)
1890                 return err;
1891
1892         e = NULL;
1893         if (i915_gem_context_user_engines(ctx))
1894                 e = __copy_engines(i915_gem_context_engines(ctx));
1895         mutex_unlock(&ctx->engines_mutex);
1896         if (IS_ERR_OR_NULL(e)) {
1897                 args->size = 0;
1898                 return PTR_ERR_OR_ZERO(e);
1899         }
1900
1901         count = e->num_engines;
1902
1903         /* Be paranoid in case we have an impedance mismatch */
1904         if (!check_struct_size(user, engines, count, &size)) {
1905                 err = -EINVAL;
1906                 goto err_free;
1907         }
1908         if (overflows_type(size, args->size)) {
1909                 err = -EINVAL;
1910                 goto err_free;
1911         }
1912
1913         if (!args->size) {
1914                 args->size = size;
1915                 goto err_free;
1916         }
1917
1918         if (args->size < size) {
1919                 err = -EINVAL;
1920                 goto err_free;
1921         }
1922
1923         user = u64_to_user_ptr(args->value);
1924         if (put_user(0, &user->extensions)) {
1925                 err = -EFAULT;
1926                 goto err_free;
1927         }
1928
1929         for (n = 0; n < count; n++) {
1930                 struct i915_engine_class_instance ci = {
1931                         .engine_class = I915_ENGINE_CLASS_INVALID,
1932                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1933                 };
1934
1935                 if (e->engines[n]) {
1936                         ci.engine_class = e->engines[n]->engine->uabi_class;
1937                         ci.engine_instance = e->engines[n]->engine->uabi_instance;
1938                 }
1939
1940                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1941                         err = -EFAULT;
1942                         goto err_free;
1943                 }
1944         }
1945
1946         args->size = size;
1947
1948 err_free:
1949         free_engines(e);
1950         return err;
1951 }
1952
1953 static int
1954 set_persistence(struct i915_gem_context *ctx,
1955                 const struct drm_i915_gem_context_param *args)
1956 {
1957         if (args->size)
1958                 return -EINVAL;
1959
1960         return __context_set_persistence(ctx, args->value);
1961 }
1962
1963 static int __apply_priority(struct intel_context *ce, void *arg)
1964 {
1965         struct i915_gem_context *ctx = arg;
1966
1967         if (!intel_engine_has_timeslices(ce->engine))
1968                 return 0;
1969
1970         if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1971                 intel_context_set_use_semaphores(ce);
1972         else
1973                 intel_context_clear_use_semaphores(ce);
1974
1975         return 0;
1976 }
1977
1978 static int set_priority(struct i915_gem_context *ctx,
1979                         const struct drm_i915_gem_context_param *args)
1980 {
1981         s64 priority = args->value;
1982
1983         if (args->size)
1984                 return -EINVAL;
1985
1986         if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1987                 return -ENODEV;
1988
1989         if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1990             priority < I915_CONTEXT_MIN_USER_PRIORITY)
1991                 return -EINVAL;
1992
1993         if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1994             !capable(CAP_SYS_NICE))
1995                 return -EPERM;
1996
1997         ctx->sched.priority = I915_USER_PRIORITY(priority);
1998         context_apply_all(ctx, __apply_priority, ctx);
1999
2000         return 0;
2001 }
2002
2003 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2004                         struct i915_gem_context *ctx,
2005                         struct drm_i915_gem_context_param *args)
2006 {
2007         int ret = 0;
2008
2009         switch (args->param) {
2010         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2011                 if (args->size)
2012                         ret = -EINVAL;
2013                 else if (args->value)
2014                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2015                 else
2016                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2017                 break;
2018
2019         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2020                 if (args->size)
2021                         ret = -EINVAL;
2022                 else if (args->value)
2023                         i915_gem_context_set_no_error_capture(ctx);
2024                 else
2025                         i915_gem_context_clear_no_error_capture(ctx);
2026                 break;
2027
2028         case I915_CONTEXT_PARAM_BANNABLE:
2029                 if (args->size)
2030                         ret = -EINVAL;
2031                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2032                         ret = -EPERM;
2033                 else if (args->value)
2034                         i915_gem_context_set_bannable(ctx);
2035                 else
2036                         i915_gem_context_clear_bannable(ctx);
2037                 break;
2038
2039         case I915_CONTEXT_PARAM_RECOVERABLE:
2040                 if (args->size)
2041                         ret = -EINVAL;
2042                 else if (args->value)
2043                         i915_gem_context_set_recoverable(ctx);
2044                 else
2045                         i915_gem_context_clear_recoverable(ctx);
2046                 break;
2047
2048         case I915_CONTEXT_PARAM_PRIORITY:
2049                 ret = set_priority(ctx, args);
2050                 break;
2051
2052         case I915_CONTEXT_PARAM_SSEU:
2053                 ret = set_sseu(ctx, args);
2054                 break;
2055
2056         case I915_CONTEXT_PARAM_VM:
2057                 ret = set_ppgtt(fpriv, ctx, args);
2058                 break;
2059
2060         case I915_CONTEXT_PARAM_ENGINES:
2061                 ret = set_engines(ctx, args);
2062                 break;
2063
2064         case I915_CONTEXT_PARAM_PERSISTENCE:
2065                 ret = set_persistence(ctx, args);
2066                 break;
2067
2068         case I915_CONTEXT_PARAM_RINGSIZE:
2069                 ret = set_ringsize(ctx, args);
2070                 break;
2071
2072         case I915_CONTEXT_PARAM_BAN_PERIOD:
2073         default:
2074                 ret = -EINVAL;
2075                 break;
2076         }
2077
2078         return ret;
2079 }
2080
2081 struct create_ext {
2082         struct i915_gem_context *ctx;
2083         struct drm_i915_file_private *fpriv;
2084 };
2085
2086 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2087 {
2088         struct drm_i915_gem_context_create_ext_setparam local;
2089         const struct create_ext *arg = data;
2090
2091         if (copy_from_user(&local, ext, sizeof(local)))
2092                 return -EFAULT;
2093
2094         if (local.param.ctx_id)
2095                 return -EINVAL;
2096
2097         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
2098 }
2099
2100 static int copy_ring_size(struct intel_context *dst,
2101                           struct intel_context *src)
2102 {
2103         long sz;
2104
2105         sz = intel_context_get_ring_size(src);
2106         if (sz < 0)
2107                 return sz;
2108
2109         return intel_context_set_ring_size(dst, sz);
2110 }
2111
2112 static int clone_engines(struct i915_gem_context *dst,
2113                          struct i915_gem_context *src)
2114 {
2115         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2116         struct i915_gem_engines *clone;
2117         bool user_engines;
2118         unsigned long n;
2119
2120         clone = alloc_engines(e->num_engines);
2121         if (!clone)
2122                 goto err_unlock;
2123
2124         for (n = 0; n < e->num_engines; n++) {
2125                 struct intel_engine_cs *engine;
2126
2127                 if (!e->engines[n]) {
2128                         clone->engines[n] = NULL;
2129                         continue;
2130                 }
2131                 engine = e->engines[n]->engine;
2132
2133                 /*
2134                  * Virtual engines are singletons; they can only exist
2135                  * inside a single context, because they embed their
2136                  * HW context... As each virtual context implies a single
2137                  * timeline (each engine can only dequeue a single request
2138                  * at any time), it would be surprising for two contexts
2139                  * to use the same engine. So let's create a copy of
2140                  * the virtual engine instead.
2141                  */
2142                 if (intel_engine_is_virtual(engine))
2143                         clone->engines[n] =
2144                                 intel_execlists_clone_virtual(engine);
2145                 else
2146                         clone->engines[n] = intel_context_create(engine);
2147                 if (IS_ERR_OR_NULL(clone->engines[n])) {
2148                         __free_engines(clone, n);
2149                         goto err_unlock;
2150                 }
2151
2152                 intel_context_set_gem(clone->engines[n], dst);
2153
2154                 /* Copy across the preferred ringsize */
2155                 if (copy_ring_size(clone->engines[n], e->engines[n])) {
2156                         __free_engines(clone, n + 1);
2157                         goto err_unlock;
2158                 }
2159         }
2160         clone->num_engines = n;
2161
2162         user_engines = i915_gem_context_user_engines(src);
2163         i915_gem_context_unlock_engines(src);
2164
2165         /* Serialised by constructor */
2166         engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
2167         if (user_engines)
2168                 i915_gem_context_set_user_engines(dst);
2169         else
2170                 i915_gem_context_clear_user_engines(dst);
2171         return 0;
2172
2173 err_unlock:
2174         i915_gem_context_unlock_engines(src);
2175         return -ENOMEM;
2176 }
2177
2178 static int clone_flags(struct i915_gem_context *dst,
2179                        struct i915_gem_context *src)
2180 {
2181         dst->user_flags = src->user_flags;
2182         return 0;
2183 }
2184
2185 static int clone_schedattr(struct i915_gem_context *dst,
2186                            struct i915_gem_context *src)
2187 {
2188         dst->sched = src->sched;
2189         return 0;
2190 }
2191
2192 static int clone_sseu(struct i915_gem_context *dst,
2193                       struct i915_gem_context *src)
2194 {
2195         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2196         struct i915_gem_engines *clone;
2197         unsigned long n;
2198         int err;
2199
2200         /* no locking required; sole access under constructor*/
2201         clone = __context_engines_static(dst);
2202         if (e->num_engines != clone->num_engines) {
2203                 err = -EINVAL;
2204                 goto unlock;
2205         }
2206
2207         for (n = 0; n < e->num_engines; n++) {
2208                 struct intel_context *ce = e->engines[n];
2209
2210                 if (clone->engines[n]->engine->class != ce->engine->class) {
2211                         /* Must have compatible engine maps! */
2212                         err = -EINVAL;
2213                         goto unlock;
2214                 }
2215
2216                 /* serialises with set_sseu */
2217                 err = intel_context_lock_pinned(ce);
2218                 if (err)
2219                         goto unlock;
2220
2221                 clone->engines[n]->sseu = ce->sseu;
2222                 intel_context_unlock_pinned(ce);
2223         }
2224
2225         err = 0;
2226 unlock:
2227         i915_gem_context_unlock_engines(src);
2228         return err;
2229 }
2230
2231 static int clone_timeline(struct i915_gem_context *dst,
2232                           struct i915_gem_context *src)
2233 {
2234         if (src->timeline)
2235                 __assign_timeline(dst, src->timeline);
2236
2237         return 0;
2238 }
2239
2240 static int clone_vm(struct i915_gem_context *dst,
2241                     struct i915_gem_context *src)
2242 {
2243         struct i915_address_space *vm;
2244         int err = 0;
2245
2246         if (!rcu_access_pointer(src->vm))
2247                 return 0;
2248
2249         rcu_read_lock();
2250         vm = context_get_vm_rcu(src);
2251         rcu_read_unlock();
2252
2253         if (!mutex_lock_interruptible(&dst->mutex)) {
2254                 __assign_ppgtt(dst, vm);
2255                 mutex_unlock(&dst->mutex);
2256         } else {
2257                 err = -EINTR;
2258         }
2259
2260         i915_vm_put(vm);
2261         return err;
2262 }
2263
2264 static int create_clone(struct i915_user_extension __user *ext, void *data)
2265 {
2266         static int (* const fn[])(struct i915_gem_context *dst,
2267                                   struct i915_gem_context *src) = {
2268 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2269                 MAP(ENGINES, clone_engines),
2270                 MAP(FLAGS, clone_flags),
2271                 MAP(SCHEDATTR, clone_schedattr),
2272                 MAP(SSEU, clone_sseu),
2273                 MAP(TIMELINE, clone_timeline),
2274                 MAP(VM, clone_vm),
2275 #undef MAP
2276         };
2277         struct drm_i915_gem_context_create_ext_clone local;
2278         const struct create_ext *arg = data;
2279         struct i915_gem_context *dst = arg->ctx;
2280         struct i915_gem_context *src;
2281         int err, bit;
2282
2283         if (copy_from_user(&local, ext, sizeof(local)))
2284                 return -EFAULT;
2285
2286         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2287                      I915_CONTEXT_CLONE_UNKNOWN);
2288
2289         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2290                 return -EINVAL;
2291
2292         if (local.rsvd)
2293                 return -EINVAL;
2294
2295         rcu_read_lock();
2296         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2297         rcu_read_unlock();
2298         if (!src)
2299                 return -ENOENT;
2300
2301         GEM_BUG_ON(src == dst);
2302
2303         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2304                 if (!(local.flags & BIT(bit)))
2305                         continue;
2306
2307                 err = fn[bit](dst, src);
2308                 if (err)
2309                         return err;
2310         }
2311
2312         return 0;
2313 }
2314
2315 static const i915_user_extension_fn create_extensions[] = {
2316         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2317         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2318 };
2319
2320 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2321 {
2322         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2323 }
2324
2325 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2326                                   struct drm_file *file)
2327 {
2328         struct drm_i915_private *i915 = to_i915(dev);
2329         struct drm_i915_gem_context_create_ext *args = data;
2330         struct create_ext ext_data;
2331         int ret;
2332         u32 id;
2333
2334         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2335                 return -ENODEV;
2336
2337         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2338                 return -EINVAL;
2339
2340         ret = intel_gt_terminally_wedged(&i915->gt);
2341         if (ret)
2342                 return ret;
2343
2344         ext_data.fpriv = file->driver_priv;
2345         if (client_is_banned(ext_data.fpriv)) {
2346                 drm_dbg(&i915->drm,
2347                         "client %s[%d] banned from creating ctx\n",
2348                         current->comm, task_pid_nr(current));
2349                 return -EIO;
2350         }
2351
2352         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2353         if (IS_ERR(ext_data.ctx))
2354                 return PTR_ERR(ext_data.ctx);
2355
2356         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2357                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2358                                            create_extensions,
2359                                            ARRAY_SIZE(create_extensions),
2360                                            &ext_data);
2361                 if (ret)
2362                         goto err_ctx;
2363         }
2364
2365         ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2366         if (ret < 0)
2367                 goto err_ctx;
2368
2369         args->ctx_id = id;
2370         drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2371
2372         return 0;
2373
2374 err_ctx:
2375         context_close(ext_data.ctx);
2376         return ret;
2377 }
2378
2379 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2380                                    struct drm_file *file)
2381 {
2382         struct drm_i915_gem_context_destroy *args = data;
2383         struct drm_i915_file_private *file_priv = file->driver_priv;
2384         struct i915_gem_context *ctx;
2385
2386         if (args->pad != 0)
2387                 return -EINVAL;
2388
2389         if (!args->ctx_id)
2390                 return -ENOENT;
2391
2392         ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2393         if (!ctx)
2394                 return -ENOENT;
2395
2396         context_close(ctx);
2397         return 0;
2398 }
2399
2400 static int get_sseu(struct i915_gem_context *ctx,
2401                     struct drm_i915_gem_context_param *args)
2402 {
2403         struct drm_i915_gem_context_param_sseu user_sseu;
2404         struct intel_context *ce;
2405         unsigned long lookup;
2406         int err;
2407
2408         if (args->size == 0)
2409                 goto out;
2410         else if (args->size < sizeof(user_sseu))
2411                 return -EINVAL;
2412
2413         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2414                            sizeof(user_sseu)))
2415                 return -EFAULT;
2416
2417         if (user_sseu.rsvd)
2418                 return -EINVAL;
2419
2420         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2421                 return -EINVAL;
2422
2423         lookup = 0;
2424         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2425                 lookup |= LOOKUP_USER_INDEX;
2426
2427         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2428         if (IS_ERR(ce))
2429                 return PTR_ERR(ce);
2430
2431         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2432         if (err) {
2433                 intel_context_put(ce);
2434                 return err;
2435         }
2436
2437         user_sseu.slice_mask = ce->sseu.slice_mask;
2438         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2439         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2440         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2441
2442         intel_context_unlock_pinned(ce);
2443         intel_context_put(ce);
2444
2445         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2446                          sizeof(user_sseu)))
2447                 return -EFAULT;
2448
2449 out:
2450         args->size = sizeof(user_sseu);
2451
2452         return 0;
2453 }
2454
2455 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2456                                     struct drm_file *file)
2457 {
2458         struct drm_i915_file_private *file_priv = file->driver_priv;
2459         struct drm_i915_gem_context_param *args = data;
2460         struct i915_gem_context *ctx;
2461         int ret = 0;
2462
2463         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2464         if (!ctx)
2465                 return -ENOENT;
2466
2467         switch (args->param) {
2468         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2469                 args->size = 0;
2470                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2471                 break;
2472
2473         case I915_CONTEXT_PARAM_GTT_SIZE:
2474                 args->size = 0;
2475                 rcu_read_lock();
2476                 if (rcu_access_pointer(ctx->vm))
2477                         args->value = rcu_dereference(ctx->vm)->total;
2478                 else
2479                         args->value = to_i915(dev)->ggtt.vm.total;
2480                 rcu_read_unlock();
2481                 break;
2482
2483         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2484                 args->size = 0;
2485                 args->value = i915_gem_context_no_error_capture(ctx);
2486                 break;
2487
2488         case I915_CONTEXT_PARAM_BANNABLE:
2489                 args->size = 0;
2490                 args->value = i915_gem_context_is_bannable(ctx);
2491                 break;
2492
2493         case I915_CONTEXT_PARAM_RECOVERABLE:
2494                 args->size = 0;
2495                 args->value = i915_gem_context_is_recoverable(ctx);
2496                 break;
2497
2498         case I915_CONTEXT_PARAM_PRIORITY:
2499                 args->size = 0;
2500                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2501                 break;
2502
2503         case I915_CONTEXT_PARAM_SSEU:
2504                 ret = get_sseu(ctx, args);
2505                 break;
2506
2507         case I915_CONTEXT_PARAM_VM:
2508                 ret = get_ppgtt(file_priv, ctx, args);
2509                 break;
2510
2511         case I915_CONTEXT_PARAM_ENGINES:
2512                 ret = get_engines(ctx, args);
2513                 break;
2514
2515         case I915_CONTEXT_PARAM_PERSISTENCE:
2516                 args->size = 0;
2517                 args->value = i915_gem_context_is_persistent(ctx);
2518                 break;
2519
2520         case I915_CONTEXT_PARAM_RINGSIZE:
2521                 ret = get_ringsize(ctx, args);
2522                 break;
2523
2524         case I915_CONTEXT_PARAM_BAN_PERIOD:
2525         default:
2526                 ret = -EINVAL;
2527                 break;
2528         }
2529
2530         i915_gem_context_put(ctx);
2531         return ret;
2532 }
2533
2534 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2535                                     struct drm_file *file)
2536 {
2537         struct drm_i915_file_private *file_priv = file->driver_priv;
2538         struct drm_i915_gem_context_param *args = data;
2539         struct i915_gem_context *ctx;
2540         int ret;
2541
2542         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2543         if (!ctx)
2544                 return -ENOENT;
2545
2546         ret = ctx_setparam(file_priv, ctx, args);
2547
2548         i915_gem_context_put(ctx);
2549         return ret;
2550 }
2551
2552 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2553                                        void *data, struct drm_file *file)
2554 {
2555         struct drm_i915_private *i915 = to_i915(dev);
2556         struct drm_i915_reset_stats *args = data;
2557         struct i915_gem_context *ctx;
2558         int ret;
2559
2560         if (args->flags || args->pad)
2561                 return -EINVAL;
2562
2563         ret = -ENOENT;
2564         rcu_read_lock();
2565         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2566         if (!ctx)
2567                 goto out;
2568
2569         /*
2570          * We opt for unserialised reads here. This may result in tearing
2571          * in the extremely unlikely event of a GPU hang on this context
2572          * as we are querying them. If we need that extra layer of protection,
2573          * we should wrap the hangstats with a seqlock.
2574          */
2575
2576         if (capable(CAP_SYS_ADMIN))
2577                 args->reset_count = i915_reset_count(&i915->gpu_error);
2578         else
2579                 args->reset_count = 0;
2580
2581         args->batch_active = atomic_read(&ctx->guilty_count);
2582         args->batch_pending = atomic_read(&ctx->active_count);
2583
2584         ret = 0;
2585 out:
2586         rcu_read_unlock();
2587         return ret;
2588 }
2589
2590 /* GEM context-engines iterator: for_each_gem_engine() */
2591 struct intel_context *
2592 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2593 {
2594         const struct i915_gem_engines *e = it->engines;
2595         struct intel_context *ctx;
2596
2597         if (unlikely(!e))
2598                 return NULL;
2599
2600         do {
2601                 if (it->idx >= e->num_engines)
2602                         return NULL;
2603
2604                 ctx = e->engines[it->idx++];
2605         } while (!ctx);
2606
2607         return ctx;
2608 }
2609
2610 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2611 #include "selftests/mock_context.c"
2612 #include "selftests/i915_gem_context.c"
2613 #endif
2614
2615 static void i915_global_gem_context_shrink(void)
2616 {
2617         kmem_cache_shrink(global.slab_luts);
2618 }
2619
2620 static void i915_global_gem_context_exit(void)
2621 {
2622         kmem_cache_destroy(global.slab_luts);
2623 }
2624
2625 static struct i915_global_gem_context global = { {
2626         .shrink = i915_global_gem_context_shrink,
2627         .exit = i915_global_gem_context_exit,
2628 } };
2629
2630 int __init i915_global_gem_context_init(void)
2631 {
2632         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2633         if (!global.slab_luts)
2634                 return -ENOMEM;
2635
2636         i915_global_register(&global.base);
2637         return 0;
2638 }