drm/i915: Fix kerneldocs for intel_audio.c
[linux-block.git] / drivers / gpu / drm / i915 / i915_gem_context.c
CommitLineData
254f965c
BW
1/*
2 * Copyright © 2011-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28/*
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
35 *
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
46 *
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
51 *
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
63 *
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
73 *
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
508842a0 76 * GPU. The GPU has loaded its state already and has stored away the gtt
254f965c
BW
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
80 *
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
85 *
86 */
87
4ff4b44c 88#include <linux/log2.h>
760285e7
DH
89#include <drm/drmP.h>
90#include <drm/i915_drm.h>
254f965c 91#include "i915_drv.h"
198c974d 92#include "i915_trace.h"
254f965c 93
b2e862d0
CW
94#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
95
d1b48c1e 96static void lut_close(struct i915_gem_context *ctx)
4ff4b44c 97{
d1b48c1e
CW
98 struct i915_lut_handle *lut, *ln;
99 struct radix_tree_iter iter;
100 void __rcu **slot;
101
102 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
103 list_del(&lut->obj_link);
104 kmem_cache_free(ctx->i915->luts, lut);
4ff4b44c 105 }
4ff4b44c 106
547da76b 107 rcu_read_lock();
d1b48c1e
CW
108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
109 struct i915_vma *vma = rcu_dereference_raw(*slot);
4ff4b44c 110
d1b48c1e 111 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
94dec871 112 __i915_gem_object_release_unless_active(vma->obj);
4ff4b44c 113 }
547da76b 114 rcu_read_unlock();
4ff4b44c
CW
115}
116
5f09a9c8 117static void i915_gem_context_free(struct i915_gem_context *ctx)
40521054 118{
bca44d80 119 int i;
40521054 120
91c8a326 121 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
6095868a 122 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
198c974d 123
ae6c4806
DV
124 i915_ppgtt_put(ctx->ppgtt);
125
bca44d80
CW
126 for (i = 0; i < I915_NUM_ENGINES; i++) {
127 struct intel_context *ce = &ctx->engine[i];
128
129 if (!ce->state)
130 continue;
131
132 WARN_ON(ce->pin_count);
dca33ecc 133 if (ce->ring)
7e37f889 134 intel_ring_free(ce->ring);
bca44d80 135
f8a7fde4 136 __i915_gem_object_release_unless_active(ce->state->obj);
bca44d80
CW
137 }
138
562f5d45 139 kfree(ctx->name);
c84455b4 140 put_pid(ctx->pid);
4ff4b44c 141
c7c48dfd 142 list_del(&ctx->link);
5d1808ec 143
829a0af2 144 ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
1acfc104 145 kfree_rcu(ctx, rcu);
40521054
BW
146}
147
5f09a9c8
CW
148static void contexts_free(struct drm_i915_private *i915)
149{
150 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
fad20834 151 struct i915_gem_context *ctx, *cn;
5f09a9c8
CW
152
153 lockdep_assert_held(&i915->drm.struct_mutex);
154
fad20834 155 llist_for_each_entry_safe(ctx, cn, freed, free_link)
5f09a9c8
CW
156 i915_gem_context_free(ctx);
157}
158
cb0aeaa8
CW
159static void contexts_free_first(struct drm_i915_private *i915)
160{
161 struct i915_gem_context *ctx;
162 struct llist_node *freed;
163
164 lockdep_assert_held(&i915->drm.struct_mutex);
165
166 freed = llist_del_first(&i915->contexts.free_list);
167 if (!freed)
168 return;
169
170 ctx = container_of(freed, typeof(*ctx), free_link);
171 i915_gem_context_free(ctx);
172}
173
5f09a9c8
CW
174static void contexts_free_worker(struct work_struct *work)
175{
176 struct drm_i915_private *i915 =
177 container_of(work, typeof(*i915), contexts.free_work);
178
179 mutex_lock(&i915->drm.struct_mutex);
180 contexts_free(i915);
181 mutex_unlock(&i915->drm.struct_mutex);
182}
183
184void i915_gem_context_release(struct kref *ref)
185{
186 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
187 struct drm_i915_private *i915 = ctx->i915;
188
189 trace_i915_context_free(ctx);
190 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
191 queue_work(i915->wq, &i915->contexts.free_work);
192}
193
50e046b6
CW
194static void context_close(struct i915_gem_context *ctx)
195{
6095868a 196 i915_gem_context_set_closed(ctx);
d1b48c1e 197
94dec871
CW
198 /*
199 * The LUT uses the VMA as a backpointer to unref the object,
200 * so we need to clear the LUT before we close all the VMA (inside
201 * the ppgtt).
202 */
d1b48c1e 203 lut_close(ctx);
50e046b6
CW
204 if (ctx->ppgtt)
205 i915_ppgtt_close(&ctx->ppgtt->base);
d1b48c1e 206
50e046b6
CW
207 ctx->file_priv = ERR_PTR(-EBADF);
208 i915_gem_context_put(ctx);
209}
210
5d1808ec
CW
211static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
212{
213 int ret;
214
829a0af2 215 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
5d1808ec
CW
216 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
217 if (ret < 0) {
218 /* Contexts are only released when no longer active.
219 * Flush any pending retires to hopefully release some
220 * stale contexts and try again.
221 */
c033666a 222 i915_gem_retire_requests(dev_priv);
829a0af2 223 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
5d1808ec
CW
224 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
225 if (ret < 0)
226 return ret;
227 }
228
229 *out = ret;
230 return 0;
231}
232
949e8ab3
CW
233static u32 default_desc_template(const struct drm_i915_private *i915,
234 const struct i915_hw_ppgtt *ppgtt)
2355cf08 235{
949e8ab3 236 u32 address_mode;
2355cf08
MK
237 u32 desc;
238
949e8ab3 239 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
2355cf08 240
949e8ab3
CW
241 address_mode = INTEL_LEGACY_32B_CONTEXT;
242 if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
243 address_mode = INTEL_LEGACY_64B_CONTEXT;
244 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
245
246 if (IS_GEN8(i915))
2355cf08
MK
247 desc |= GEN8_CTX_L3LLC_COHERENT;
248
249 /* TODO: WaDisableLiteRestore when we start using semaphore
250 * signalling between Command Streamers
251 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
252 */
253
254 return desc;
255}
256
e2efd130 257static struct i915_gem_context *
bf9e8429 258__create_hw_context(struct drm_i915_private *dev_priv,
ee960be7 259 struct drm_i915_file_private *file_priv)
40521054 260{
e2efd130 261 struct i915_gem_context *ctx;
c8c470af 262 int ret;
40521054 263
f94982b0 264 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
146937e5
BW
265 if (ctx == NULL)
266 return ERR_PTR(-ENOMEM);
40521054 267
5d1808ec
CW
268 ret = assign_hw_id(dev_priv, &ctx->hw_id);
269 if (ret) {
270 kfree(ctx);
271 return ERR_PTR(ret);
272 }
273
dce3271b 274 kref_init(&ctx->ref);
829a0af2 275 list_add_tail(&ctx->link, &dev_priv->contexts.list);
9ea4feec 276 ctx->i915 = dev_priv;
e4f815f6 277 ctx->priority = I915_PRIORITY_NORMAL;
40521054 278
d1b48c1e
CW
279 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
280 INIT_LIST_HEAD(&ctx->handles_list);
4ff4b44c 281
40521054 282 /* Default context will never have a file_priv */
562f5d45
CW
283 ret = DEFAULT_CONTEXT_HANDLE;
284 if (file_priv) {
691e6415 285 ret = idr_alloc(&file_priv->context_idr, ctx,
821d66dd 286 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
691e6415 287 if (ret < 0)
4ff4b44c 288 goto err_lut;
562f5d45
CW
289 }
290 ctx->user_handle = ret;
dce3271b
MK
291
292 ctx->file_priv = file_priv;
562f5d45 293 if (file_priv) {
c84455b4 294 ctx->pid = get_task_pid(current, PIDTYPE_PID);
562f5d45
CW
295 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
296 current->comm,
297 pid_nr(ctx->pid),
298 ctx->user_handle);
299 if (!ctx->name) {
300 ret = -ENOMEM;
301 goto err_pid;
302 }
303 }
c84455b4 304
3ccfd19d
BW
305 /* NB: Mark all slices as needing a remap so that when the context first
306 * loads it will restore whatever remap state already exists. If there
307 * is no remap info, it will be a NOP. */
b2e862d0 308 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
40521054 309
6095868a 310 i915_gem_context_set_bannable(ctx);
bcd794c2 311 ctx->ring_size = 4 * PAGE_SIZE;
949e8ab3
CW
312 ctx->desc_template =
313 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
676fa572 314
d3ef1af6
DCS
315 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
316 * present or not in use we still need a small bias as ring wraparound
317 * at offset 0 sometimes hangs. No idea why.
318 */
4f044a88 319 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
d3ef1af6
DCS
320 ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
321 else
f51455d4 322 ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
d3ef1af6 323
146937e5 324 return ctx;
40521054 325
562f5d45
CW
326err_pid:
327 put_pid(ctx->pid);
328 idr_remove(&file_priv->context_idr, ctx->user_handle);
4ff4b44c 329err_lut:
50e046b6 330 context_close(ctx);
146937e5 331 return ERR_PTR(ret);
40521054
BW
332}
333
6d1f9fb3
JL
334static void __destroy_hw_context(struct i915_gem_context *ctx,
335 struct drm_i915_file_private *file_priv)
336{
337 idr_remove(&file_priv->context_idr, ctx->user_handle);
338 context_close(ctx);
339}
340
254f965c
BW
341/**
342 * The default context needs to exist per ring that uses contexts. It stores the
343 * context state of the GPU for applications that don't utilize HW contexts, as
344 * well as an idle case.
345 */
e2efd130 346static struct i915_gem_context *
bf9e8429 347i915_gem_create_context(struct drm_i915_private *dev_priv,
d624d86e 348 struct drm_i915_file_private *file_priv)
254f965c 349{
e2efd130 350 struct i915_gem_context *ctx;
40521054 351
bf9e8429 352 lockdep_assert_held(&dev_priv->drm.struct_mutex);
40521054 353
cb0aeaa8
CW
354 /* Reap the most stale context */
355 contexts_free_first(dev_priv);
ddfc9258 356
bf9e8429 357 ctx = __create_hw_context(dev_priv, file_priv);
146937e5 358 if (IS_ERR(ctx))
a45d0f6a 359 return ctx;
40521054 360
bf9e8429 361 if (USES_FULL_PPGTT(dev_priv)) {
80b204bc 362 struct i915_hw_ppgtt *ppgtt;
bdf4fd7e 363
bf9e8429 364 ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
c6aab916 365 if (IS_ERR(ppgtt)) {
0eea67eb
BW
366 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
367 PTR_ERR(ppgtt));
6d1f9fb3 368 __destroy_hw_context(ctx, file_priv);
c6aab916 369 return ERR_CAST(ppgtt);
ae6c4806
DV
370 }
371
372 ctx->ppgtt = ppgtt;
949e8ab3 373 ctx->desc_template = default_desc_template(dev_priv, ppgtt);
ae6c4806 374 }
bdf4fd7e 375
198c974d
DCS
376 trace_i915_context_create(ctx);
377
a45d0f6a 378 return ctx;
254f965c
BW
379}
380
c8c35799
ZW
381/**
382 * i915_gem_context_create_gvt - create a GVT GEM context
383 * @dev: drm device *
384 *
385 * This function is used to create a GVT specific GEM context.
386 *
387 * Returns:
388 * pointer to i915_gem_context on success, error pointer if failed
389 *
390 */
391struct i915_gem_context *
392i915_gem_context_create_gvt(struct drm_device *dev)
393{
394 struct i915_gem_context *ctx;
395 int ret;
396
397 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
398 return ERR_PTR(-ENODEV);
399
400 ret = i915_mutex_lock_interruptible(dev);
401 if (ret)
402 return ERR_PTR(ret);
403
984ff29f 404 ctx = __create_hw_context(to_i915(dev), NULL);
c8c35799
ZW
405 if (IS_ERR(ctx))
406 goto out;
407
984ff29f 408 ctx->file_priv = ERR_PTR(-EBADF);
6095868a
CW
409 i915_gem_context_set_closed(ctx); /* not user accessible */
410 i915_gem_context_clear_bannable(ctx);
411 i915_gem_context_set_force_single_submission(ctx);
4f044a88 412 if (!i915_modparams.enable_guc_submission)
718e884a 413 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
984ff29f
CW
414
415 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
c8c35799
ZW
416out:
417 mutex_unlock(&dev->struct_mutex);
418 return ctx;
419}
420
d2b4b979
CW
421struct i915_gem_context *
422i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
e7af3116
CW
423{
424 struct i915_gem_context *ctx;
425
426 ctx = i915_gem_create_context(i915, NULL);
427 if (IS_ERR(ctx))
428 return ctx;
429
430 i915_gem_context_clear_bannable(ctx);
431 ctx->priority = prio;
432 ctx->ring_size = PAGE_SIZE;
433
434 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
435
436 return ctx;
437}
438
439static void
440destroy_kernel_context(struct i915_gem_context **ctxp)
441{
442 struct i915_gem_context *ctx;
443
444 /* Keep the context ref so that we can free it immediately ourselves */
445 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
446 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
447
448 context_close(ctx);
449 i915_gem_context_free(ctx);
450}
451
829a0af2 452int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
254f965c 453{
e2efd130 454 struct i915_gem_context *ctx;
e7af3116 455 int err;
254f965c 456
e7af3116 457 GEM_BUG_ON(dev_priv->kernel_context);
254f965c 458
829a0af2 459 INIT_LIST_HEAD(&dev_priv->contexts.list);
5f09a9c8
CW
460 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
461 init_llist_head(&dev_priv->contexts.free_list);
829a0af2 462
c033666a
CW
463 if (intel_vgpu_active(dev_priv) &&
464 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
4f044a88 465 if (!i915_modparams.enable_execlists) {
a0bd6c31
ZL
466 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
467 return -EINVAL;
468 }
469 }
470
5d1808ec
CW
471 /* Using the simple ida interface, the max is limited by sizeof(int) */
472 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
829a0af2 473 ida_init(&dev_priv->contexts.hw_ida);
5d1808ec 474
e7af3116 475 /* lowest priority; idle task */
d2b4b979 476 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
691e6415 477 if (IS_ERR(ctx)) {
e7af3116
CW
478 DRM_ERROR("Failed to create default global context\n");
479 err = PTR_ERR(ctx);
480 goto err;
254f965c 481 }
e7af3116
CW
482 /*
483 * For easy recognisablity, we want the kernel context to be 0 and then
5d12fcef
CW
484 * all user contexts will have non-zero hw_id.
485 */
486 GEM_BUG_ON(ctx->hw_id);
ed54c1a1 487 dev_priv->kernel_context = ctx;
67e3d297 488
e7af3116 489 /* highest priority; preempting task */
d2b4b979 490 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
e7af3116
CW
491 if (IS_ERR(ctx)) {
492 DRM_ERROR("Failed to create default preempt context\n");
493 err = PTR_ERR(ctx);
494 goto err_kernel_context;
495 }
496 dev_priv->preempt_context = ctx;
984ff29f 497
ede7d42b 498 DRM_DEBUG_DRIVER("%s context support initialized\n",
63ffbcda
JL
499 dev_priv->engine[RCS]->context_size ? "logical" :
500 "fake");
8245be31 501 return 0;
e7af3116
CW
502
503err_kernel_context:
504 destroy_kernel_context(&dev_priv->kernel_context);
505err:
506 return err;
254f965c
BW
507}
508
829a0af2 509void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
b2e862d0
CW
510{
511 struct intel_engine_cs *engine;
3b3f1650 512 enum intel_engine_id id;
b2e862d0 513
91c8a326 514 lockdep_assert_held(&dev_priv->drm.struct_mutex);
499f2697 515
3b3f1650 516 for_each_engine(engine, dev_priv, id) {
e8a9c58f
CW
517 engine->legacy_active_context = NULL;
518
519 if (!engine->last_retired_context)
520 continue;
521
522 engine->context_unpin(engine, engine->last_retired_context);
523 engine->last_retired_context = NULL;
b2e862d0 524 }
b2e862d0
CW
525}
526
5f09a9c8 527void i915_gem_contexts_fini(struct drm_i915_private *i915)
254f965c 528{
5f09a9c8 529 lockdep_assert_held(&i915->drm.struct_mutex);
984ff29f 530
e7af3116
CW
531 destroy_kernel_context(&i915->preempt_context);
532 destroy_kernel_context(&i915->kernel_context);
5d1808ec 533
5f09a9c8
CW
534 /* Must free all deferred contexts (via flush_workqueue) first */
535 ida_destroy(&i915->contexts.hw_ida);
254f965c
BW
536}
537
40521054
BW
538static int context_idr_cleanup(int id, void *p, void *data)
539{
e2efd130 540 struct i915_gem_context *ctx = p;
40521054 541
50e046b6 542 context_close(ctx);
40521054 543 return 0;
254f965c
BW
544}
545
829a0af2
CW
546int i915_gem_context_open(struct drm_i915_private *i915,
547 struct drm_file *file)
e422b888
BW
548{
549 struct drm_i915_file_private *file_priv = file->driver_priv;
e2efd130 550 struct i915_gem_context *ctx;
e422b888
BW
551
552 idr_init(&file_priv->context_idr);
553
829a0af2
CW
554 mutex_lock(&i915->drm.struct_mutex);
555 ctx = i915_gem_create_context(i915, file_priv);
556 mutex_unlock(&i915->drm.struct_mutex);
f83d6518 557 if (IS_ERR(ctx)) {
0eea67eb 558 idr_destroy(&file_priv->context_idr);
f83d6518 559 return PTR_ERR(ctx);
0eea67eb
BW
560 }
561
e4d5dc21
CW
562 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
563
e422b888
BW
564 return 0;
565}
566
829a0af2 567void i915_gem_context_close(struct drm_file *file)
254f965c 568{
40521054 569 struct drm_i915_file_private *file_priv = file->driver_priv;
254f965c 570
829a0af2 571 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
499f2697 572
73c273eb 573 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
40521054 574 idr_destroy(&file_priv->context_idr);
40521054
BW
575}
576
e0556841 577static inline int
e555e326 578mi_set_context(struct drm_i915_gem_request *req, u32 flags)
e0556841 579{
c033666a 580 struct drm_i915_private *dev_priv = req->i915;
4a570db5 581 struct intel_engine_cs *engine = req->engine;
3b3f1650 582 enum intel_engine_id id;
2c550183 583 const int num_rings =
e02d9d76 584 /* Use an extended w/a on gen7 if signalling from other rings */
4f044a88 585 (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
c1bb1145 586 INTEL_INFO(dev_priv)->num_rings - 1 :
2c550183 587 0;
a937eaf8 588 int len;
e555e326 589 u32 *cs;
e0556841 590
e555e326 591 flags |= MI_MM_SPACE_GTT;
c033666a 592 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
e555e326
CW
593 /* These flags are for resource streamer on HSW+ */
594 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
595 else
596 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
2c550183
CW
597
598 len = 4;
c033666a 599 if (INTEL_GEN(dev_priv) >= 7)
e9135c4f 600 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
2c550183 601
73dec95e
TU
602 cs = intel_ring_begin(req, len);
603 if (IS_ERR(cs))
604 return PTR_ERR(cs);
e0556841 605
b3f797ac 606 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
c033666a 607 if (INTEL_GEN(dev_priv) >= 7) {
73dec95e 608 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
2c550183
CW
609 if (num_rings) {
610 struct intel_engine_cs *signaller;
611
73dec95e 612 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
3b3f1650 613 for_each_engine(signaller, dev_priv, id) {
e2f80391 614 if (signaller == engine)
2c550183
CW
615 continue;
616
73dec95e
TU
617 *cs++ = i915_mmio_reg_offset(
618 RING_PSMI_CTL(signaller->mmio_base));
619 *cs++ = _MASKED_BIT_ENABLE(
620 GEN6_PSMI_SLEEP_MSG_DISABLE);
2c550183
CW
621 }
622 }
623 }
e37ec39b 624
73dec95e
TU
625 *cs++ = MI_NOOP;
626 *cs++ = MI_SET_CONTEXT;
627 *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
2b7e8082
VS
628 /*
629 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
630 * WaMiSetContext_Hang:snb,ivb,vlv
631 */
73dec95e 632 *cs++ = MI_NOOP;
e0556841 633
c033666a 634 if (INTEL_GEN(dev_priv) >= 7) {
2c550183
CW
635 if (num_rings) {
636 struct intel_engine_cs *signaller;
e9135c4f 637 i915_reg_t last_reg = {}; /* keep gcc quiet */
2c550183 638
73dec95e 639 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
3b3f1650 640 for_each_engine(signaller, dev_priv, id) {
e2f80391 641 if (signaller == engine)
2c550183
CW
642 continue;
643
e9135c4f 644 last_reg = RING_PSMI_CTL(signaller->mmio_base);
73dec95e
TU
645 *cs++ = i915_mmio_reg_offset(last_reg);
646 *cs++ = _MASKED_BIT_DISABLE(
647 GEN6_PSMI_SLEEP_MSG_DISABLE);
2c550183 648 }
e9135c4f
CW
649
650 /* Insert a delay before the next switch! */
73dec95e
TU
651 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
652 *cs++ = i915_mmio_reg_offset(last_reg);
653 *cs++ = i915_ggtt_offset(engine->scratch);
654 *cs++ = MI_NOOP;
2c550183 655 }
73dec95e 656 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2c550183 657 }
e37ec39b 658
73dec95e 659 intel_ring_advance(req, cs);
e0556841 660
a937eaf8 661 return 0;
e0556841
BW
662}
663
d200cda6 664static int remap_l3(struct drm_i915_gem_request *req, int slice)
b0ebde39 665{
73dec95e
TU
666 u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
667 int i;
b0ebde39 668
ff55b5e8 669 if (!remap_info)
b0ebde39
CW
670 return 0;
671
73dec95e
TU
672 cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
673 if (IS_ERR(cs))
674 return PTR_ERR(cs);
b0ebde39
CW
675
676 /*
677 * Note: We do not worry about the concurrent register cacheline hang
678 * here because no other code should access these registers other than
679 * at initialization time.
680 */
73dec95e 681 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
ff55b5e8 682 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
73dec95e
TU
683 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
684 *cs++ = remap_info[i];
b0ebde39 685 }
73dec95e
TU
686 *cs++ = MI_NOOP;
687 intel_ring_advance(req, cs);
b0ebde39 688
ff55b5e8 689 return 0;
b0ebde39
CW
690}
691
f9326be5
CW
692static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
693 struct intel_engine_cs *engine,
e2efd130 694 struct i915_gem_context *to)
317b4e90 695{
563222a7
BW
696 if (to->remap_slice)
697 return false;
698
f9326be5 699 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
fcb5106d 700 return false;
317b4e90 701
e8a9c58f 702 return to == engine->legacy_active_context;
317b4e90
BW
703}
704
705static bool
12124bea 706needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
317b4e90 707{
12124bea
CW
708 struct i915_gem_context *from = engine->legacy_active_context;
709
f9326be5 710 if (!ppgtt)
317b4e90
BW
711 return false;
712
f9326be5 713 /* Always load the ppgtt on first use */
12124bea 714 if (!from)
f9326be5
CW
715 return true;
716
717 /* Same context without new entries, skip */
12124bea 718 if ((!from->ppgtt || from->ppgtt == ppgtt) &&
f9326be5 719 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
e1a8daa2
CW
720 return false;
721
722 if (engine->id != RCS)
317b4e90
BW
723 return true;
724
c033666a 725 if (INTEL_GEN(engine->i915) < 8)
317b4e90
BW
726 return true;
727
728 return false;
729}
730
731static bool
f9326be5 732needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
e2efd130 733 struct i915_gem_context *to,
f9326be5 734 u32 hw_flags)
317b4e90 735{
f9326be5 736 if (!ppgtt)
317b4e90
BW
737 return false;
738
fcb5106d 739 if (!IS_GEN8(to->i915))
317b4e90
BW
740 return false;
741
6702cf16 742 if (hw_flags & MI_RESTORE_INHIBIT)
317b4e90
BW
743 return true;
744
745 return false;
746}
747
e1a8daa2 748static int do_rcs_switch(struct drm_i915_gem_request *req)
e0556841 749{
e2efd130 750 struct i915_gem_context *to = req->ctx;
4a570db5 751 struct intel_engine_cs *engine = req->engine;
f9326be5 752 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
e8a9c58f 753 struct i915_gem_context *from = engine->legacy_active_context;
fcb5106d 754 u32 hw_flags;
3ccfd19d 755 int ret, i;
e0556841 756
e8a9c58f
CW
757 GEM_BUG_ON(engine->id != RCS);
758
f9326be5 759 if (skip_rcs_switch(ppgtt, engine, to))
9a3b5304
CW
760 return 0;
761
12124bea 762 if (needs_pd_load_pre(ppgtt, engine)) {
fcb5106d
CW
763 /* Older GENs and non render rings still want the load first,
764 * "PP_DCLV followed by PP_DIR_BASE register through Load
765 * Register Immediate commands in Ring Buffer before submitting
766 * a context."*/
767 trace_switch_mm(engine, to);
f9326be5 768 ret = ppgtt->switch_mm(ppgtt, req);
fcb5106d 769 if (ret)
e8a9c58f 770 return ret;
fcb5106d
CW
771 }
772
d2b4b979
CW
773 if (i915_gem_context_is_kernel(to))
774 /*
775 * The kernel context(s) is treated as pure scratch and is not
776 * expected to retain any state (as we sacrifice it during
777 * suspend and on resume it may be corrupted). This is ok,
778 * as nothing actually executes using the kernel context; it
779 * is purely used for flushing user contexts.
780 */
fcb5106d 781 hw_flags = MI_RESTORE_INHIBIT;
f9326be5 782 else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
fcb5106d
CW
783 hw_flags = MI_FORCE_RESTORE;
784 else
785 hw_flags = 0;
e0556841 786
fcb5106d
CW
787 if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
788 ret = mi_set_context(req, hw_flags);
3ccfd19d 789 if (ret)
e8a9c58f 790 return ret;
3ccfd19d 791
e8a9c58f 792 engine->legacy_active_context = to;
e0556841 793 }
e0556841 794
fcb5106d
CW
795 /* GEN8 does *not* require an explicit reload if the PDPs have been
796 * setup, and we do not wish to move them.
797 */
f9326be5 798 if (needs_pd_load_post(ppgtt, to, hw_flags)) {
fcb5106d 799 trace_switch_mm(engine, to);
f9326be5 800 ret = ppgtt->switch_mm(ppgtt, req);
fcb5106d
CW
801 /* The hardware context switch is emitted, but we haven't
802 * actually changed the state - so it's probably safe to bail
803 * here. Still, let the user know something dangerous has
804 * happened.
805 */
806 if (ret)
807 return ret;
808 }
809
f9326be5
CW
810 if (ppgtt)
811 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
fcb5106d
CW
812
813 for (i = 0; i < MAX_L3_SLICES; i++) {
814 if (!(to->remap_slice & (1<<i)))
815 continue;
816
d200cda6 817 ret = remap_l3(req, i);
fcb5106d
CW
818 if (ret)
819 return ret;
820
821 to->remap_slice &= ~(1<<i);
822 }
823
e0556841
BW
824 return 0;
825}
826
827/**
828 * i915_switch_context() - perform a GPU context switch.
ba01cc93 829 * @req: request for which we'll execute the context switch
e0556841
BW
830 *
831 * The context life cycle is simple. The context refcount is incremented and
832 * decremented by 1 and create and destroy. If the context is in use by the GPU,
ecdb5fd8 833 * it will have a refcount > 1. This allows us to destroy the context abstract
e0556841 834 * object while letting the normal object tracking destroy the backing BO.
ecdb5fd8
TD
835 *
836 * This function should not be used in execlists mode. Instead the context is
837 * switched by writing to the ELSP and requests keep a reference to their
838 * context.
e0556841 839 */
ba01cc93 840int i915_switch_context(struct drm_i915_gem_request *req)
e0556841 841{
4a570db5 842 struct intel_engine_cs *engine = req->engine;
e0556841 843
91c8a326 844 lockdep_assert_held(&req->i915->drm.struct_mutex);
4f044a88 845 if (i915_modparams.enable_execlists)
5b043f4e 846 return 0;
0eea67eb 847
bca44d80 848 if (!req->ctx->engine[engine->id].state) {
e2efd130 849 struct i915_gem_context *to = req->ctx;
f9326be5
CW
850 struct i915_hw_ppgtt *ppgtt =
851 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
e1a8daa2 852
12124bea 853 if (needs_pd_load_pre(ppgtt, engine)) {
e1a8daa2
CW
854 int ret;
855
856 trace_switch_mm(engine, to);
f9326be5 857 ret = ppgtt->switch_mm(ppgtt, req);
e1a8daa2
CW
858 if (ret)
859 return ret;
860
f9326be5 861 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
e1a8daa2
CW
862 }
863
12124bea 864 engine->legacy_active_context = to;
c482972a 865 return 0;
a95f6a00 866 }
c482972a 867
e1a8daa2 868 return do_rcs_switch(req);
e0556841 869}
84624813 870
20ccd4d3 871static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
f131e356
CW
872{
873 struct i915_gem_timeline *timeline;
874
875 list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
876 struct intel_timeline *tl;
877
878 if (timeline == &engine->i915->gt.global_timeline)
879 continue;
880
881 tl = &timeline->engine[engine->id];
882 if (i915_gem_active_peek(&tl->last_request,
883 &engine->i915->drm.struct_mutex))
884 return false;
885 }
886
20ccd4d3 887 return intel_engine_has_kernel_context(engine);
f131e356
CW
888}
889
945657b4
CW
890int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
891{
892 struct intel_engine_cs *engine;
3033acab 893 struct i915_gem_timeline *timeline;
3b3f1650 894 enum intel_engine_id id;
945657b4 895
3033acab
CW
896 lockdep_assert_held(&dev_priv->drm.struct_mutex);
897
f131e356
CW
898 i915_gem_retire_requests(dev_priv);
899
3b3f1650 900 for_each_engine(engine, dev_priv, id) {
945657b4
CW
901 struct drm_i915_gem_request *req;
902 int ret;
903
20ccd4d3 904 if (engine_has_idle_kernel_context(engine))
f131e356
CW
905 continue;
906
945657b4
CW
907 req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
908 if (IS_ERR(req))
909 return PTR_ERR(req);
910
3033acab
CW
911 /* Queue this switch after all other activity */
912 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
913 struct drm_i915_gem_request *prev;
914 struct intel_timeline *tl;
915
916 tl = &timeline->engine[engine->id];
917 prev = i915_gem_active_raw(&tl->last_request,
918 &dev_priv->drm.struct_mutex);
919 if (prev)
920 i915_sw_fence_await_sw_fence_gfp(&req->submit,
921 &prev->submit,
922 GFP_KERNEL);
923 }
924
5b043f4e 925 ret = i915_switch_context(req);
e642c85b 926 i915_add_request(req);
945657b4
CW
927 if (ret)
928 return ret;
929 }
930
931 return 0;
932}
933
b083a087
MK
934static bool client_is_banned(struct drm_i915_file_private *file_priv)
935{
77b25a97 936 return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
b083a087
MK
937}
938
84624813
BW
939int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
940 struct drm_file *file)
941{
63ffbcda 942 struct drm_i915_private *dev_priv = to_i915(dev);
84624813
BW
943 struct drm_i915_gem_context_create *args = data;
944 struct drm_i915_file_private *file_priv = file->driver_priv;
e2efd130 945 struct i915_gem_context *ctx;
84624813
BW
946 int ret;
947
63ffbcda 948 if (!dev_priv->engine[RCS]->context_size)
5fa8be65
DV
949 return -ENODEV;
950
b31e5136
CW
951 if (args->pad != 0)
952 return -EINVAL;
953
b083a087
MK
954 if (client_is_banned(file_priv)) {
955 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
956 current->comm,
957 pid_nr(get_task_pid(current, PIDTYPE_PID)));
958
959 return -EIO;
960 }
961
84624813
BW
962 ret = i915_mutex_lock_interruptible(dev);
963 if (ret)
964 return ret;
965
63ffbcda 966 ctx = i915_gem_create_context(dev_priv, file_priv);
84624813 967 mutex_unlock(&dev->struct_mutex);
be636387
DC
968 if (IS_ERR(ctx))
969 return PTR_ERR(ctx);
84624813 970
984ff29f
CW
971 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
972
821d66dd 973 args->ctx_id = ctx->user_handle;
b84cf536 974 DRM_DEBUG("HW context %d created\n", args->ctx_id);
84624813 975
be636387 976 return 0;
84624813
BW
977}
978
979int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file)
981{
982 struct drm_i915_gem_context_destroy *args = data;
983 struct drm_i915_file_private *file_priv = file->driver_priv;
e2efd130 984 struct i915_gem_context *ctx;
84624813
BW
985 int ret;
986
b31e5136
CW
987 if (args->pad != 0)
988 return -EINVAL;
989
821d66dd 990 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
c2cf2416 991 return -ENOENT;
0eea67eb 992
ca585b5d 993 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1acfc104
CW
994 if (!ctx)
995 return -ENOENT;
996
997 ret = mutex_lock_interruptible(&dev->struct_mutex);
998 if (ret)
999 goto out;
84624813 1000
6d1f9fb3 1001 __destroy_hw_context(ctx, file_priv);
84624813
BW
1002 mutex_unlock(&dev->struct_mutex);
1003
1acfc104
CW
1004out:
1005 i915_gem_context_put(ctx);
84624813
BW
1006 return 0;
1007}
c9dc0f35
CW
1008
1009int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file)
1011{
1012 struct drm_i915_file_private *file_priv = file->driver_priv;
1013 struct drm_i915_gem_context_param *args = data;
e2efd130 1014 struct i915_gem_context *ctx;
1acfc104 1015 int ret = 0;
c9dc0f35 1016
ca585b5d 1017 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1acfc104
CW
1018 if (!ctx)
1019 return -ENOENT;
c9dc0f35
CW
1020
1021 args->size = 0;
1022 switch (args->param) {
1023 case I915_CONTEXT_PARAM_BAN_PERIOD:
84102171 1024 ret = -EINVAL;
c9dc0f35 1025 break;
b1b38278
DW
1026 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1027 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
1028 break;
fa8848f2
CW
1029 case I915_CONTEXT_PARAM_GTT_SIZE:
1030 if (ctx->ppgtt)
1031 args->value = ctx->ppgtt->base.total;
1032 else if (to_i915(dev)->mm.aliasing_ppgtt)
1033 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
1034 else
62106b4f 1035 args->value = to_i915(dev)->ggtt.base.total;
fa8848f2 1036 break;
bc3d6744 1037 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
6095868a 1038 args->value = i915_gem_context_no_error_capture(ctx);
bc3d6744 1039 break;
84102171 1040 case I915_CONTEXT_PARAM_BANNABLE:
6095868a 1041 args->value = i915_gem_context_is_bannable(ctx);
84102171 1042 break;
ac14fbd4
CW
1043 case I915_CONTEXT_PARAM_PRIORITY:
1044 args->value = ctx->priority;
1045 break;
c9dc0f35
CW
1046 default:
1047 ret = -EINVAL;
1048 break;
1049 }
c9dc0f35 1050
1acfc104 1051 i915_gem_context_put(ctx);
c9dc0f35
CW
1052 return ret;
1053}
1054
1055int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1056 struct drm_file *file)
1057{
1058 struct drm_i915_file_private *file_priv = file->driver_priv;
1059 struct drm_i915_gem_context_param *args = data;
e2efd130 1060 struct i915_gem_context *ctx;
c9dc0f35
CW
1061 int ret;
1062
1acfc104
CW
1063 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1064 if (!ctx)
1065 return -ENOENT;
1066
c9dc0f35
CW
1067 ret = i915_mutex_lock_interruptible(dev);
1068 if (ret)
1acfc104 1069 goto out;
c9dc0f35
CW
1070
1071 switch (args->param) {
1072 case I915_CONTEXT_PARAM_BAN_PERIOD:
84102171 1073 ret = -EINVAL;
c9dc0f35 1074 break;
b1b38278
DW
1075 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1076 if (args->size) {
1077 ret = -EINVAL;
1078 } else {
1079 ctx->flags &= ~CONTEXT_NO_ZEROMAP;
1080 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
bc3d6744
CW
1081 }
1082 break;
1083 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
6095868a 1084 if (args->size)
bc3d6744 1085 ret = -EINVAL;
6095868a
CW
1086 else if (args->value)
1087 i915_gem_context_set_no_error_capture(ctx);
1088 else
1089 i915_gem_context_clear_no_error_capture(ctx);
b1b38278 1090 break;
84102171
MK
1091 case I915_CONTEXT_PARAM_BANNABLE:
1092 if (args->size)
1093 ret = -EINVAL;
1094 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1095 ret = -EPERM;
6095868a
CW
1096 else if (args->value)
1097 i915_gem_context_set_bannable(ctx);
84102171 1098 else
6095868a 1099 i915_gem_context_clear_bannable(ctx);
84102171 1100 break;
ac14fbd4
CW
1101
1102 case I915_CONTEXT_PARAM_PRIORITY:
1103 {
1104 int priority = args->value;
1105
1106 if (args->size)
1107 ret = -EINVAL;
1108 else if (!to_i915(dev)->engine[RCS]->schedule)
1109 ret = -ENODEV;
1110 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1111 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1112 ret = -EINVAL;
1113 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1114 !capable(CAP_SYS_NICE))
1115 ret = -EPERM;
1116 else
1117 ctx->priority = priority;
1118 }
1119 break;
1120
c9dc0f35
CW
1121 default:
1122 ret = -EINVAL;
1123 break;
1124 }
1125 mutex_unlock(&dev->struct_mutex);
1126
1acfc104
CW
1127out:
1128 i915_gem_context_put(ctx);
c9dc0f35
CW
1129 return ret;
1130}
d538704b
CW
1131
1132int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1133 void *data, struct drm_file *file)
1134{
fac5e23e 1135 struct drm_i915_private *dev_priv = to_i915(dev);
d538704b 1136 struct drm_i915_reset_stats *args = data;
e2efd130 1137 struct i915_gem_context *ctx;
d538704b
CW
1138 int ret;
1139
1140 if (args->flags || args->pad)
1141 return -EINVAL;
1142
1acfc104
CW
1143 ret = -ENOENT;
1144 rcu_read_lock();
1145 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
1146 if (!ctx)
1147 goto out;
d538704b 1148
1acfc104
CW
1149 /*
1150 * We opt for unserialised reads here. This may result in tearing
1151 * in the extremely unlikely event of a GPU hang on this context
1152 * as we are querying them. If we need that extra layer of protection,
1153 * we should wrap the hangstats with a seqlock.
1154 */
d538704b
CW
1155
1156 if (capable(CAP_SYS_ADMIN))
1157 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1158 else
1159 args->reset_count = 0;
1160
77b25a97
CW
1161 args->batch_active = atomic_read(&ctx->guilty_count);
1162 args->batch_pending = atomic_read(&ctx->active_count);
d538704b 1163
1acfc104
CW
1164 ret = 0;
1165out:
1166 rcu_read_unlock();
1167 return ret;
d538704b 1168}
0daf0113
CW
1169
1170#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1171#include "selftests/mock_context.c"
791ff39a 1172#include "selftests/i915_gem_context.c"
0daf0113 1173#endif