1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016 Intel Corporation
6 #include "gem/i915_gem_context.h"
7 #include "gt/intel_ring.h"
10 #include "intel_context.h"
11 #include "intel_engine_pm.h"
13 #include "mock_engine.h"
14 #include "selftests/mock_request.h"
16 static int mock_timeline_pin(struct intel_timeline *tl)
20 if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL)))
23 err = intel_timeline_pin_map(tl);
24 i915_gem_object_unlock(tl->hwsp_ggtt->obj);
28 atomic_inc(&tl->pin_count);
32 static void mock_timeline_unpin(struct intel_timeline *tl)
34 GEM_BUG_ON(!atomic_read(&tl->pin_count));
35 atomic_dec(&tl->pin_count);
38 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
40 struct i915_address_space *vm = &ggtt->vm;
41 struct drm_i915_private *i915 = vm->i915;
42 struct drm_i915_gem_object *obj;
45 obj = i915_gem_object_create_internal(i915, size);
49 vma = i915_vma_instance(obj, vm, NULL);
56 i915_gem_object_put(obj);
60 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
62 const unsigned long sz = PAGE_SIZE;
63 struct intel_ring *ring;
65 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
69 kref_init(&ring->ref);
71 ring->effective_size = sz;
72 ring->vaddr = (void *)(ring + 1);
73 atomic_set(&ring->pin_count, 1);
75 ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
76 if (IS_ERR(ring->vma)) {
81 intel_ring_update_space(ring);
86 static void mock_ring_free(struct intel_ring *ring)
88 i915_vma_put(ring->vma);
93 static struct i915_request *first_request(struct mock_engine *engine)
95 return list_first_entry_or_null(&engine->hw_queue,
100 static void advance(struct i915_request *request)
102 list_del_init(&request->mock.link);
103 i915_request_mark_complete(request);
104 GEM_BUG_ON(!i915_request_completed(request));
106 intel_engine_signal_breadcrumbs(request->engine);
109 static void hw_delay_complete(struct timer_list *t)
111 struct mock_engine *engine = from_timer(engine, t, hw_delay);
112 struct i915_request *request;
115 spin_lock_irqsave(&engine->hw_lock, flags);
117 /* Timer fired, first request is complete */
118 request = first_request(engine);
123 * Also immediately signal any subsequent 0-delay requests, but
124 * requeue the timer for the next delayed request.
126 while ((request = first_request(engine))) {
127 if (request->mock.delay) {
128 mod_timer(&engine->hw_delay,
129 jiffies + request->mock.delay);
136 spin_unlock_irqrestore(&engine->hw_lock, flags);
139 static void mock_context_unpin(struct intel_context *ce)
143 static void mock_context_post_unpin(struct intel_context *ce)
145 i915_vma_unpin(ce->ring->vma);
148 static void mock_context_destroy(struct kref *ref)
150 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
152 GEM_BUG_ON(intel_context_is_pinned(ce));
154 if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
155 mock_ring_free(ce->ring);
156 mock_timeline_unpin(ce->timeline);
159 intel_context_fini(ce);
160 intel_context_free(ce);
163 static int mock_context_alloc(struct intel_context *ce)
167 ce->ring = mock_ring(ce->engine);
171 ce->timeline = intel_timeline_create(ce->engine->gt);
172 if (IS_ERR(ce->timeline)) {
174 return PTR_ERR(ce->timeline);
177 err = mock_timeline_pin(ce->timeline);
179 intel_timeline_put(ce->timeline);
187 static int mock_context_pre_pin(struct intel_context *ce,
188 struct i915_gem_ww_ctx *ww, void **unused)
190 return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
193 static int mock_context_pin(struct intel_context *ce, void *unused)
198 static void mock_context_reset(struct intel_context *ce)
202 static const struct intel_context_ops mock_context_ops = {
203 .alloc = mock_context_alloc,
205 .pre_pin = mock_context_pre_pin,
206 .pin = mock_context_pin,
207 .unpin = mock_context_unpin,
208 .post_unpin = mock_context_post_unpin,
210 .enter = intel_context_enter_engine,
211 .exit = intel_context_exit_engine,
213 .reset = mock_context_reset,
214 .destroy = mock_context_destroy,
217 static int mock_request_alloc(struct i915_request *request)
219 INIT_LIST_HEAD(&request->mock.link);
220 request->mock.delay = 0;
225 static int mock_emit_flush(struct i915_request *request,
231 static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
236 static void mock_submit_request(struct i915_request *request)
238 struct mock_engine *engine =
239 container_of(request->engine, typeof(*engine), base);
242 i915_request_submit(request);
244 spin_lock_irqsave(&engine->hw_lock, flags);
245 list_add_tail(&request->mock.link, &engine->hw_queue);
246 if (list_is_first(&request->mock.link, &engine->hw_queue)) {
247 if (request->mock.delay)
248 mod_timer(&engine->hw_delay,
249 jiffies + request->mock.delay);
253 spin_unlock_irqrestore(&engine->hw_lock, flags);
256 static void mock_add_to_engine(struct i915_request *rq)
258 lockdep_assert_held(&rq->engine->sched_engine->lock);
259 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
262 static void mock_remove_from_engine(struct i915_request *rq)
264 struct intel_engine_cs *engine, *locked;
267 * Virtual engines complicate acquiring the engine timeline lock,
268 * as their rq->engine pointer is not stable until under that
269 * engine lock. The simple ploy we use is to take the lock then
270 * check that the rq still belongs to the newly locked engine.
273 locked = READ_ONCE(rq->engine);
274 spin_lock_irq(&locked->sched_engine->lock);
275 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
276 spin_unlock(&locked->sched_engine->lock);
277 spin_lock(&engine->sched_engine->lock);
280 list_del_init(&rq->sched.link);
281 spin_unlock_irq(&locked->sched_engine->lock);
284 static void mock_reset_prepare(struct intel_engine_cs *engine)
288 static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
293 static void mock_reset_cancel(struct intel_engine_cs *engine)
295 struct mock_engine *mock =
296 container_of(engine, typeof(*mock), base);
297 struct i915_request *rq;
300 del_timer_sync(&mock->hw_delay);
302 spin_lock_irqsave(&engine->sched_engine->lock, flags);
304 /* Mark all submitted requests as skipped. */
305 list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
306 i915_request_put(i915_request_mark_eio(rq));
307 intel_engine_signal_breadcrumbs(engine);
309 /* Cancel and submit all pending requests. */
310 list_for_each_entry(rq, &mock->hw_queue, mock.link) {
311 if (i915_request_mark_eio(rq)) {
312 __i915_request_submit(rq);
313 i915_request_put(rq);
316 INIT_LIST_HEAD(&mock->hw_queue);
318 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
321 static void mock_reset_finish(struct intel_engine_cs *engine)
325 static void mock_engine_release(struct intel_engine_cs *engine)
327 struct mock_engine *mock =
328 container_of(engine, typeof(*mock), base);
330 GEM_BUG_ON(timer_pending(&mock->hw_delay));
332 i915_sched_engine_put(engine->sched_engine);
333 intel_breadcrumbs_put(engine->breadcrumbs);
335 intel_context_unpin(engine->kernel_context);
336 intel_context_put(engine->kernel_context);
338 intel_engine_fini_retire(engine);
341 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
345 struct mock_engine *engine;
347 GEM_BUG_ON(id >= I915_NUM_ENGINES);
348 GEM_BUG_ON(!to_gt(i915)->uncore);
350 engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
354 /* minimal engine setup for requests */
355 engine->base.i915 = i915;
356 engine->base.gt = to_gt(i915);
357 engine->base.uncore = to_gt(i915)->uncore;
358 snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
359 engine->base.id = id;
360 engine->base.mask = BIT(id);
361 engine->base.legacy_idx = INVALID_ENGINE;
362 engine->base.instance = id;
363 engine->base.status_page.addr = (void *)(engine + 1);
365 engine->base.cops = &mock_context_ops;
366 engine->base.request_alloc = mock_request_alloc;
367 engine->base.emit_flush = mock_emit_flush;
368 engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
369 engine->base.submit_request = mock_submit_request;
370 engine->base.add_active_request = mock_add_to_engine;
371 engine->base.remove_active_request = mock_remove_from_engine;
373 engine->base.reset.prepare = mock_reset_prepare;
374 engine->base.reset.rewind = mock_reset_rewind;
375 engine->base.reset.cancel = mock_reset_cancel;
376 engine->base.reset.finish = mock_reset_finish;
378 engine->base.release = mock_engine_release;
380 to_gt(i915)->engine[id] = &engine->base;
381 to_gt(i915)->engine_class[0][id] = &engine->base;
384 spin_lock_init(&engine->hw_lock);
385 timer_setup(&engine->hw_delay, hw_delay_complete, 0);
386 INIT_LIST_HEAD(&engine->hw_queue);
388 intel_engine_add_user(&engine->base);
390 return &engine->base;
393 int mock_engine_init(struct intel_engine_cs *engine)
395 struct intel_context *ce;
397 INIT_LIST_HEAD(&engine->pinned_contexts_list);
399 engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
400 if (!engine->sched_engine)
402 engine->sched_engine->private_data = engine;
404 intel_engine_init_execlists(engine);
405 intel_engine_init__pm(engine);
406 intel_engine_init_retire(engine);
408 engine->breadcrumbs = intel_breadcrumbs_create(NULL);
409 if (!engine->breadcrumbs)
412 ce = create_kernel_context(engine);
414 goto err_breadcrumbs;
416 /* We insist the kernel context is using the status_page */
417 engine->status_page.vma = ce->timeline->hwsp_ggtt;
419 engine->kernel_context = ce;
423 intel_breadcrumbs_put(engine->breadcrumbs);
425 i915_sched_engine_put(engine->sched_engine);
429 void mock_engine_flush(struct intel_engine_cs *engine)
431 struct mock_engine *mock =
432 container_of(engine, typeof(*mock), base);
433 struct i915_request *request, *rn;
435 del_timer_sync(&mock->hw_delay);
437 spin_lock_irq(&mock->hw_lock);
438 list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
440 spin_unlock_irq(&mock->hw_lock);
443 void mock_engine_reset(struct intel_engine_cs *engine)