drm/i915: Allow sharing the idle-barrier from other kernel requests
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / intel_context.c
CommitLineData
c4d52feb
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
10be98a7
CW
7#include "gem/i915_gem_context.h"
8#include "gem/i915_gem_pm.h"
9
c4d52feb 10#include "i915_drv.h"
c4d52feb 11#include "i915_globals.h"
112ed2d3 12
c4d52feb 13#include "intel_context.h"
112ed2d3 14#include "intel_engine.h"
79ffac85 15#include "intel_engine_pm.h"
c4d52feb
CW
16
17static struct i915_global_context {
18 struct i915_global base;
19 struct kmem_cache *slab_ce;
20} global;
21
5e2a0419 22static struct intel_context *intel_context_alloc(void)
c4d52feb
CW
23{
24 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
25}
26
27void intel_context_free(struct intel_context *ce)
28{
29 kmem_cache_free(global.slab_ce, ce);
30}
31
32struct intel_context *
5e2a0419 33intel_context_create(struct i915_gem_context *ctx,
c4d52feb
CW
34 struct intel_engine_cs *engine)
35{
5e2a0419 36 struct intel_context *ce;
c4d52feb
CW
37
38 ce = intel_context_alloc();
39 if (!ce)
40 return ERR_PTR(-ENOMEM);
41
42 intel_context_init(ce, ctx, engine);
5e2a0419 43 return ce;
c4d52feb
CW
44}
45
fa9f6681
CW
46int __intel_context_do_pin(struct intel_context *ce)
47{
48 int err;
08819549
CW
49
50 if (mutex_lock_interruptible(&ce->pin_mutex))
fa9f6681 51 return -EINTR;
08819549
CW
52
53 if (likely(!atomic_read(&ce->pin_count))) {
79ffac85
CW
54 intel_wakeref_t wakeref;
55
56 err = 0;
c447ff7d 57 with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
79ffac85 58 err = ce->ops->pin(ce);
95f697eb 59 if (err)
08819549 60 goto err;
95f697eb 61
cba17e5d
CW
62 GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
63 ce->engine->name, ce->ring->timeline->fence_context,
64 ce->ring->head, ce->ring->tail);
65
02684446 66 i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
95f697eb 67
08819549 68 smp_mb__before_atomic(); /* flush pin before it is visible */
95f697eb 69 }
95f697eb 70
08819549
CW
71 atomic_inc(&ce->pin_count);
72 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
73
74 mutex_unlock(&ce->pin_mutex);
fa9f6681 75 return 0;
95f697eb 76
08819549
CW
77err:
78 mutex_unlock(&ce->pin_mutex);
fa9f6681 79 return err;
95f697eb
CW
80}
81
08819549
CW
82void intel_context_unpin(struct intel_context *ce)
83{
84 if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
85 return;
86
87 /* We may be called from inside intel_context_pin() to evict another */
4c5896dc 88 intel_context_get(ce);
08819549
CW
89 mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
90
206c2f81 91 if (likely(atomic_dec_and_test(&ce->pin_count))) {
cba17e5d
CW
92 GEM_TRACE("%s context:%llx retire\n",
93 ce->engine->name, ce->ring->timeline->fence_context);
94
08819549
CW
95 ce->ops->unpin(ce);
96
206c2f81 97 i915_gem_context_put(ce->gem_context);
ce476c80 98 intel_context_active_release(ce);
206c2f81
CW
99 }
100
08819549 101 mutex_unlock(&ce->pin_mutex);
4c5896dc 102 intel_context_put(ce);
08819549
CW
103}
104
12c255b5 105static int __context_pin_state(struct i915_vma *vma)
c4d52feb 106{
12c255b5 107 u64 flags;
ce476c80 108 int err;
c4d52feb 109
12c255b5
CW
110 flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
111 flags |= PIN_HIGH | PIN_GLOBAL;
112
113 err = i915_vma_pin(vma, 0, 0, flags);
ce476c80
CW
114 if (err)
115 return err;
116
117 /*
118 * And mark it as a globally pinned object to let the shrinker know
119 * it cannot reclaim the object until we release it.
120 */
121 vma->obj->pin_global++;
122 vma->obj->mm.dirty = true;
123
124 return 0;
125}
126
127static void __context_unpin_state(struct i915_vma *vma)
128{
129 vma->obj->pin_global--;
130 __i915_vma_unpin(vma);
131}
132
12c255b5 133static void __intel_context_retire(struct i915_active *active)
ce476c80
CW
134{
135 struct intel_context *ce = container_of(active, typeof(*ce), active);
136
cba17e5d
CW
137 GEM_TRACE("%s context:%llx retire\n",
138 ce->engine->name, ce->ring->timeline->fence_context);
139
ce476c80
CW
140 if (ce->state)
141 __context_unpin_state(ce->state);
142
09c5ab38 143 intel_ring_unpin(ce->ring);
ce476c80 144 intel_context_put(ce);
c4d52feb
CW
145}
146
12c255b5 147static int __intel_context_active(struct i915_active *active)
ce476c80 148{
12c255b5 149 struct intel_context *ce = container_of(active, typeof(*ce), active);
ce476c80
CW
150 int err;
151
ce476c80
CW
152 intel_context_get(ce);
153
09c5ab38
CW
154 err = intel_ring_pin(ce->ring);
155 if (err)
156 goto err_put;
157
ce476c80
CW
158 if (!ce->state)
159 return 0;
160
12c255b5 161 err = __context_pin_state(ce->state);
09c5ab38
CW
162 if (err)
163 goto err_ring;
ce476c80 164
d8af05ff
CW
165 return 0;
166
167err_ring:
168 intel_ring_unpin(ce->ring);
169err_put:
170 intel_context_put(ce);
171 return err;
172}
173
174int intel_context_active_acquire(struct intel_context *ce)
175{
176 int err;
177
178 err = i915_active_acquire(&ce->active);
179 if (err)
180 return err;
181
ce476c80
CW
182 /* Preallocate tracking nodes */
183 if (!i915_gem_context_is_kernel(ce->gem_context)) {
184 err = i915_active_acquire_preallocate_barrier(&ce->active,
185 ce->engine);
d8af05ff
CW
186 if (err) {
187 i915_active_release(&ce->active);
188 return err;
189 }
ce476c80
CW
190 }
191
192 return 0;
d8af05ff 193}
09c5ab38 194
d8af05ff
CW
195void intel_context_active_release(struct intel_context *ce)
196{
197 /* Nodes preallocated in intel_context_active() */
198 i915_active_acquire_barrier(&ce->active);
199 i915_active_release(&ce->active);
ce476c80
CW
200}
201
12c255b5
CW
202void
203intel_context_init(struct intel_context *ce,
204 struct i915_gem_context *ctx,
205 struct intel_engine_cs *engine)
ce476c80 206{
12c255b5
CW
207 GEM_BUG_ON(!engine->cops);
208
209 kref_init(&ce->ref);
210
211 ce->gem_context = ctx;
f5d974f9
CW
212 ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
213
12c255b5
CW
214 ce->engine = engine;
215 ce->ops = engine->cops;
216 ce->sseu = engine->sseu;
217
218 INIT_LIST_HEAD(&ce->signal_link);
219 INIT_LIST_HEAD(&ce->signals);
220
221 mutex_init(&ce->pin_mutex);
222
223 i915_active_init(ctx->i915, &ce->active,
224 __intel_context_active, __intel_context_retire);
c4d52feb
CW
225}
226
df8cf31e
CW
227void intel_context_fini(struct intel_context *ce)
228{
f5d974f9
CW
229 i915_vm_put(ce->vm);
230
df8cf31e
CW
231 mutex_destroy(&ce->pin_mutex);
232 i915_active_fini(&ce->active);
233}
234
c4d52feb
CW
235static void i915_global_context_shrink(void)
236{
237 kmem_cache_shrink(global.slab_ce);
238}
239
240static void i915_global_context_exit(void)
241{
242 kmem_cache_destroy(global.slab_ce);
243}
244
245static struct i915_global_context global = { {
246 .shrink = i915_global_context_shrink,
247 .exit = i915_global_context_exit,
248} };
249
250int __init i915_global_context_init(void)
251{
252 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
253 if (!global.slab_ce)
254 return -ENOMEM;
255
256 i915_global_register(&global.base);
257 return 0;
258}
6eee33e8
CW
259
260void intel_context_enter_engine(struct intel_context *ce)
261{
79ffac85 262 intel_engine_pm_get(ce->engine);
6eee33e8
CW
263}
264
265void intel_context_exit_engine(struct intel_context *ce)
266{
79ffac85 267 intel_engine_pm_put(ce->engine);
6eee33e8 268}
5e2a0419 269
a9877da2
CW
270int intel_context_prepare_remote_request(struct intel_context *ce,
271 struct i915_request *rq)
272{
273 struct intel_timeline *tl = ce->ring->timeline;
274 int err;
275
276 /* Only suitable for use in remotely modifying this context */
277 GEM_BUG_ON(rq->hw_context == ce);
278
340c4c8d
CW
279 if (rq->timeline != tl) { /* beware timeline sharing */
280 err = mutex_lock_interruptible_nested(&tl->mutex,
281 SINGLE_DEPTH_NESTING);
282 if (err)
283 return err;
284
285 /* Queue this switch after current activity by this context. */
286 err = i915_active_request_set(&tl->last_request, rq);
287 if (err)
288 goto unlock;
289 }
290 lockdep_assert_held(&tl->mutex);
a9877da2
CW
291
292 /*
293 * Guarantee context image and the timeline remains pinned until the
294 * modifying request is retired by setting the ce activity tracker.
295 *
296 * But we only need to take one pin on the account of it. Or in other
297 * words transfer the pinned ce object to tracked active request.
298 */
299 GEM_BUG_ON(i915_active_is_idle(&ce->active));
340c4c8d
CW
300 err = i915_active_ref(&ce->active, rq->fence.context, rq);
301
302unlock:
303 if (rq->timeline != tl)
304 mutex_unlock(&tl->mutex);
305 return err;
a9877da2
CW
306}
307
5e2a0419
CW
308struct i915_request *intel_context_create_request(struct intel_context *ce)
309{
310 struct i915_request *rq;
311 int err;
312
313 err = intel_context_pin(ce);
314 if (unlikely(err))
315 return ERR_PTR(err);
316
317 rq = i915_request_create(ce);
318 intel_context_unpin(ce);
319
320 return rq;
321}
d8af05ff
CW
322
323#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
324#include "selftest_context.c"
325#endif