2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
41 #define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
44 static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
55 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
57 struct intel_vgpu *vgpu = workload->vgpu;
58 int ring_id = workload->ring_id;
59 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
60 struct drm_i915_gem_object *ctx_obj =
61 shadow_ctx->__engine[ring_id].state->obj;
62 struct execlist_ring_context *shadow_ring_context;
65 if (WARN_ON(!workload->shadow_mm))
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
79 * when populating shadow ctx from guest, we should not overrride oa related
80 * registers, so that they will not be overlapped by guest oa configs. Thus
81 * made it possible to capture oa data from host for both host and guests.
83 static void sr_oa_regs(struct intel_vgpu_workload *workload,
84 u32 *reg_state, bool save)
86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
87 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
88 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
91 i915_mmio_reg_offset(EU_PERF_CNTL0),
92 i915_mmio_reg_offset(EU_PERF_CNTL1),
93 i915_mmio_reg_offset(EU_PERF_CNTL2),
94 i915_mmio_reg_offset(EU_PERF_CNTL3),
95 i915_mmio_reg_offset(EU_PERF_CNTL4),
96 i915_mmio_reg_offset(EU_PERF_CNTL5),
97 i915_mmio_reg_offset(EU_PERF_CNTL6),
100 if (workload->ring_id != RCS)
104 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
106 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
107 u32 state_offset = ctx_flexeu0 + i * 2;
109 workload->flex_mmio[i] = reg_state[state_offset + 1];
112 reg_state[ctx_oactxctrl] =
113 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
114 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
116 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
117 u32 state_offset = ctx_flexeu0 + i * 2;
118 u32 mmio = flex_mmio[i];
120 reg_state[state_offset] = mmio;
121 reg_state[state_offset + 1] = workload->flex_mmio[i];
126 static int populate_shadow_context(struct intel_vgpu_workload *workload)
128 struct intel_vgpu *vgpu = workload->vgpu;
129 struct intel_gvt *gvt = vgpu->gvt;
130 int ring_id = workload->ring_id;
131 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
132 struct drm_i915_gem_object *ctx_obj =
133 shadow_ctx->__engine[ring_id].state->obj;
134 struct execlist_ring_context *shadow_ring_context;
137 unsigned long context_gpa, context_page_num;
140 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
141 workload->ctx_desc.lrca);
143 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
145 context_page_num = context_page_num >> PAGE_SHIFT;
147 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
148 context_page_num = 19;
152 while (i < context_page_num) {
153 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
154 (u32)((workload->ctx_desc.lrca + i) <<
155 I915_GTT_PAGE_SHIFT));
156 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
157 gvt_vgpu_err("Invalid guest context descriptor\n");
161 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
163 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
169 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
170 shadow_ring_context = kmap(page);
172 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
173 #define COPY_REG(name) \
174 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
175 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
176 #define COPY_REG_MASKED(name) {\
177 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
178 + RING_CTX_OFF(name.val),\
179 &shadow_ring_context->name.val, 4);\
180 shadow_ring_context->name.val |= 0xffff << 16;\
183 COPY_REG_MASKED(ctx_ctrl);
184 COPY_REG(ctx_timestamp);
186 if (ring_id == RCS) {
187 COPY_REG(bb_per_ctx_ptr);
188 COPY_REG(rcs_indirect_ctx);
189 COPY_REG(rcs_indirect_ctx_offset);
192 #undef COPY_REG_MASKED
194 intel_gvt_hypervisor_read_gpa(vgpu,
195 workload->ring_context_gpa +
196 sizeof(*shadow_ring_context),
197 (void *)shadow_ring_context +
198 sizeof(*shadow_ring_context),
199 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
201 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
206 static inline bool is_gvt_request(struct i915_request *req)
208 return i915_gem_context_force_single_submission(req->ctx);
211 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
213 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
214 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
217 reg = RING_INSTDONE(ring_base);
218 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
219 reg = RING_ACTHD(ring_base);
220 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
221 reg = RING_ACTHD_UDW(ring_base);
222 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
225 static int shadow_context_status_change(struct notifier_block *nb,
226 unsigned long action, void *data)
228 struct i915_request *req = data;
229 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
230 shadow_ctx_notifier_block[req->engine->id]);
231 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
232 enum intel_engine_id ring_id = req->engine->id;
233 struct intel_vgpu_workload *workload;
236 if (!is_gvt_request(req)) {
237 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
238 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
239 scheduler->engine_owner[ring_id]) {
240 /* Switch ring from vGPU to host. */
241 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
243 scheduler->engine_owner[ring_id] = NULL;
245 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
250 workload = scheduler->current_workload[ring_id];
251 if (unlikely(!workload))
255 case INTEL_CONTEXT_SCHEDULE_IN:
256 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
257 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
258 /* Switch ring from host to vGPU or vGPU to vGPU. */
259 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
260 workload->vgpu, ring_id);
261 scheduler->engine_owner[ring_id] = workload->vgpu;
263 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
264 ring_id, workload->vgpu->id);
265 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
266 atomic_set(&workload->shadow_ctx_active, 1);
268 case INTEL_CONTEXT_SCHEDULE_OUT:
269 save_ring_hw_state(workload->vgpu, ring_id);
270 atomic_set(&workload->shadow_ctx_active, 0);
272 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
273 save_ring_hw_state(workload->vgpu, ring_id);
279 wake_up(&workload->shadow_ctx_status_wq);
283 static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
284 struct intel_engine_cs *engine)
286 struct intel_context *ce = to_intel_context(ctx, engine);
291 /* Update bits 0-11 of the context descriptor which includes flags
292 * like GEN8_CTX_* cached in desc_template
294 desc &= U64_MAX << 12;
295 desc |= ctx->desc_template & ((1ULL << 12) - 1);
300 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
302 struct intel_vgpu *vgpu = workload->vgpu;
303 void *shadow_ring_buffer_va;
305 struct i915_request *req = workload->req;
307 if (IS_KABYLAKE(req->i915) &&
308 is_inhibit_context(req->ctx, req->engine->id))
309 intel_vgpu_restore_inhibit_context(vgpu, req);
311 /* allocate shadow ring buffer */
312 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
314 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
319 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
321 /* get shadow ring buffer va */
322 workload->shadow_ring_buffer_va = cs;
324 memcpy(cs, shadow_ring_buffer_va,
327 cs += workload->rb_len / sizeof(u32);
328 intel_ring_advance(workload->req, cs);
333 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
335 if (!wa_ctx->indirect_ctx.obj)
338 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
339 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
343 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
344 * shadow it as well, include ringbuffer,wa_ctx and ctx.
345 * @workload: an abstract entity for each execlist submission.
347 * This function is called before the workload submitting to i915, to make
348 * sure the content of the workload is valid.
350 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
352 struct intel_vgpu *vgpu = workload->vgpu;
353 struct intel_vgpu_submission *s = &vgpu->submission;
354 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
355 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
356 int ring_id = workload->ring_id;
357 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
358 struct intel_ring *ring;
361 lockdep_assert_held(&dev_priv->drm.struct_mutex);
363 if (workload->shadowed)
366 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
367 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
368 GEN8_CTX_ADDRESSING_MODE_SHIFT;
370 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
371 shadow_context_descriptor_update(shadow_ctx,
372 dev_priv->engine[ring_id]);
374 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
378 if ((workload->ring_id == RCS) &&
379 (workload->wa_ctx.indirect_ctx.size != 0)) {
380 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
385 /* pin shadow context by gvt even the shadow context will be pinned
386 * when i915 alloc request. That is because gvt will update the guest
387 * context from shadow context when workload is completed, and at that
388 * moment, i915 may already unpined the shadow context to make the
389 * shadow_ctx pages invalid. So gvt need to pin itself. After update
390 * the guest context, gvt can unpin the shadow_ctx safely.
392 ring = intel_context_pin(shadow_ctx, engine);
395 gvt_vgpu_err("fail to pin shadow context\n");
399 ret = populate_shadow_context(workload);
402 workload->shadowed = true;
406 intel_context_unpin(shadow_ctx, engine);
408 release_shadow_wa_ctx(&workload->wa_ctx);
413 static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
415 int ring_id = workload->ring_id;
416 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
417 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
418 struct i915_request *rq;
419 struct intel_vgpu *vgpu = workload->vgpu;
420 struct intel_vgpu_submission *s = &vgpu->submission;
421 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
424 rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
426 gvt_vgpu_err("fail to allocate gem request\n");
431 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
433 workload->req = i915_request_get(rq);
434 ret = copy_workload_to_ring_buffer(workload);
440 intel_context_unpin(shadow_ctx, engine);
441 release_shadow_wa_ctx(&workload->wa_ctx);
445 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
447 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
449 struct intel_gvt *gvt = workload->vgpu->gvt;
450 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
451 struct intel_vgpu_shadow_bb *bb;
454 list_for_each_entry(bb, &workload->shadow_bb, list) {
455 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
456 * is only updated into ring_scan_buffer, not real ring address
457 * allocated in later copy_workload_to_ring_buffer. pls be noted
458 * shadow_ring_buffer_va is now pointed to real ring buffer va
459 * in copy_workload_to_ring_buffer.
463 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
467 /* for non-priv bb, scan&shadow is only for
468 * debugging purpose, so the content of shadow bb
469 * is the same as original bb. Therefore,
470 * here, rather than switch to shadow bb's gma
471 * address, we directly use original batch buffer's
472 * gma address, and send original bb to hardware
475 if (bb->clflush & CLFLUSH_AFTER) {
476 drm_clflush_virt_range(bb->va,
478 bb->clflush &= ~CLFLUSH_AFTER;
480 i915_gem_obj_finish_shmem_access(bb->obj);
481 bb->accessing = false;
484 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
486 if (IS_ERR(bb->vma)) {
487 ret = PTR_ERR(bb->vma);
491 /* relocate shadow batch buffer */
492 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
493 if (gmadr_bytes == 8)
494 bb->bb_start_cmd_va[2] = 0;
496 /* No one is going to touch shadow bb from now on. */
497 if (bb->clflush & CLFLUSH_AFTER) {
498 drm_clflush_virt_range(bb->va,
500 bb->clflush &= ~CLFLUSH_AFTER;
503 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
508 i915_gem_obj_finish_shmem_access(bb->obj);
509 bb->accessing = false;
511 i915_vma_move_to_active(bb->vma, workload->req, 0);
516 release_shadow_batch_buffer(workload);
520 static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
522 struct intel_vgpu_workload *workload = container_of(wa_ctx,
523 struct intel_vgpu_workload,
525 int ring_id = workload->ring_id;
526 struct intel_vgpu_submission *s = &workload->vgpu->submission;
527 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
528 struct drm_i915_gem_object *ctx_obj =
529 shadow_ctx->__engine[ring_id].state->obj;
530 struct execlist_ring_context *shadow_ring_context;
533 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
534 shadow_ring_context = kmap_atomic(page);
536 shadow_ring_context->bb_per_ctx_ptr.val =
537 (shadow_ring_context->bb_per_ctx_ptr.val &
538 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
539 shadow_ring_context->rcs_indirect_ctx.val =
540 (shadow_ring_context->rcs_indirect_ctx.val &
541 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
543 kunmap_atomic(shadow_ring_context);
547 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
549 struct i915_vma *vma;
550 unsigned char *per_ctx_va =
551 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
552 wa_ctx->indirect_ctx.size;
554 if (wa_ctx->indirect_ctx.size == 0)
557 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
558 0, CACHELINE_BYTES, 0);
562 /* FIXME: we are not tracking our pinned VMA leaving it
563 * up to the core to fix up the stray pin_count upon
567 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
569 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
570 memset(per_ctx_va, 0, CACHELINE_BYTES);
572 update_wa_ctx_2_shadow_ctx(wa_ctx);
576 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
578 struct intel_vgpu *vgpu = workload->vgpu;
579 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
580 struct intel_vgpu_shadow_bb *bb, *pos;
582 if (list_empty(&workload->shadow_bb))
585 bb = list_first_entry(&workload->shadow_bb,
586 struct intel_vgpu_shadow_bb, list);
588 mutex_lock(&dev_priv->drm.struct_mutex);
590 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
593 i915_gem_obj_finish_shmem_access(bb->obj);
595 if (bb->va && !IS_ERR(bb->va))
596 i915_gem_object_unpin_map(bb->obj);
598 if (bb->vma && !IS_ERR(bb->vma)) {
599 i915_vma_unpin(bb->vma);
600 i915_vma_close(bb->vma);
602 __i915_gem_object_release_unless_active(bb->obj);
608 mutex_unlock(&dev_priv->drm.struct_mutex);
611 static int prepare_workload(struct intel_vgpu_workload *workload)
613 struct intel_vgpu *vgpu = workload->vgpu;
616 ret = intel_vgpu_pin_mm(workload->shadow_mm);
618 gvt_vgpu_err("fail to vgpu pin mm\n");
622 update_shadow_pdps(workload);
624 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
626 gvt_vgpu_err("fail to vgpu sync oos pages\n");
630 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
632 gvt_vgpu_err("fail to flush post shadow\n");
636 ret = intel_gvt_generate_request(workload);
638 gvt_vgpu_err("fail to generate request\n");
642 ret = prepare_shadow_batch_buffer(workload);
644 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
648 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
650 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
651 goto err_shadow_batch;
654 if (workload->prepare) {
655 ret = workload->prepare(workload);
657 goto err_shadow_wa_ctx;
662 release_shadow_wa_ctx(&workload->wa_ctx);
664 release_shadow_batch_buffer(workload);
666 intel_vgpu_unpin_mm(workload->shadow_mm);
670 static int dispatch_workload(struct intel_vgpu_workload *workload)
672 struct intel_vgpu *vgpu = workload->vgpu;
673 struct intel_vgpu_submission *s = &vgpu->submission;
674 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
675 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
676 int ring_id = workload->ring_id;
677 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
680 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
683 mutex_lock(&dev_priv->drm.struct_mutex);
685 ret = intel_gvt_scan_and_shadow_workload(workload);
689 ret = prepare_workload(workload);
691 intel_context_unpin(shadow_ctx, engine);
697 workload->status = ret;
699 if (!IS_ERR_OR_NULL(workload->req)) {
700 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
701 ring_id, workload->req);
702 i915_request_add(workload->req);
703 workload->dispatched = true;
706 mutex_unlock(&dev_priv->drm.struct_mutex);
710 static struct intel_vgpu_workload *pick_next_workload(
711 struct intel_gvt *gvt, int ring_id)
713 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
714 struct intel_vgpu_workload *workload = NULL;
716 mutex_lock(&gvt->lock);
719 * no current vgpu / will be scheduled out / no workload
722 if (!scheduler->current_vgpu) {
723 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
727 if (scheduler->need_reschedule) {
728 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
732 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
736 * still have current workload, maybe the workload disptacher
737 * fail to submit it for some reason, resubmit it.
739 if (scheduler->current_workload[ring_id]) {
740 workload = scheduler->current_workload[ring_id];
741 gvt_dbg_sched("ring id %d still have current workload %p\n",
747 * pick a workload as current workload
748 * once current workload is set, schedule policy routines
749 * will wait the current workload is finished when trying to
750 * schedule out a vgpu.
752 scheduler->current_workload[ring_id] = container_of(
753 workload_q_head(scheduler->current_vgpu, ring_id)->next,
754 struct intel_vgpu_workload, list);
756 workload = scheduler->current_workload[ring_id];
758 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
760 atomic_inc(&workload->vgpu->submission.running_workload_num);
762 mutex_unlock(&gvt->lock);
766 static void update_guest_context(struct intel_vgpu_workload *workload)
768 struct intel_vgpu *vgpu = workload->vgpu;
769 struct intel_gvt *gvt = vgpu->gvt;
770 struct intel_vgpu_submission *s = &vgpu->submission;
771 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
772 int ring_id = workload->ring_id;
773 struct drm_i915_gem_object *ctx_obj =
774 shadow_ctx->__engine[ring_id].state->obj;
775 struct execlist_ring_context *shadow_ring_context;
778 unsigned long context_gpa, context_page_num;
781 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
782 workload->ctx_desc.lrca);
784 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
786 context_page_num = context_page_num >> PAGE_SHIFT;
788 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
789 context_page_num = 19;
793 while (i < context_page_num) {
794 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
795 (u32)((workload->ctx_desc.lrca + i) <<
796 I915_GTT_PAGE_SHIFT));
797 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
798 gvt_vgpu_err("invalid guest context descriptor\n");
802 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
804 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
810 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
811 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
813 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
814 shadow_ring_context = kmap(page);
816 #define COPY_REG(name) \
817 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
818 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
821 COPY_REG(ctx_timestamp);
825 intel_gvt_hypervisor_write_gpa(vgpu,
826 workload->ring_context_gpa +
827 sizeof(*shadow_ring_context),
828 (void *)shadow_ring_context +
829 sizeof(*shadow_ring_context),
830 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
835 static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
837 struct intel_vgpu_submission *s = &vgpu->submission;
838 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
839 struct intel_engine_cs *engine;
840 struct intel_vgpu_workload *pos, *n;
843 /* free the unsubmited workloads in the queues. */
844 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
845 list_for_each_entry_safe(pos, n,
846 &s->workload_q_head[engine->id], list) {
847 list_del_init(&pos->list);
848 intel_vgpu_destroy_workload(pos);
850 clear_bit(engine->id, s->shadow_ctx_desc_updated);
854 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
856 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
857 struct intel_vgpu_workload *workload =
858 scheduler->current_workload[ring_id];
859 struct intel_vgpu *vgpu = workload->vgpu;
860 struct intel_vgpu_submission *s = &vgpu->submission;
863 mutex_lock(&gvt->lock);
865 /* For the workload w/ request, needs to wait for the context
866 * switch to make sure request is completed.
867 * For the workload w/o request, directly complete the workload.
870 struct drm_i915_private *dev_priv =
871 workload->vgpu->gvt->dev_priv;
872 struct intel_engine_cs *engine =
873 dev_priv->engine[workload->ring_id];
874 wait_event(workload->shadow_ctx_status_wq,
875 !atomic_read(&workload->shadow_ctx_active));
877 /* If this request caused GPU hang, req->fence.error will
878 * be set to -EIO. Use -EIO to set workload status so
879 * that when this request caused GPU hang, didn't trigger
880 * context switch interrupt to guest.
882 if (likely(workload->status == -EINPROGRESS)) {
883 if (workload->req->fence.error == -EIO)
884 workload->status = -EIO;
886 workload->status = 0;
889 i915_request_put(fetch_and_zero(&workload->req));
891 if (!workload->status && !(vgpu->resetting_eng &
892 ENGINE_MASK(ring_id))) {
893 update_guest_context(workload);
895 for_each_set_bit(event, workload->pending_events,
897 intel_vgpu_trigger_virtual_event(vgpu, event);
899 mutex_lock(&dev_priv->drm.struct_mutex);
900 /* unpin shadow ctx as the shadow_ctx update is done */
901 intel_context_unpin(s->shadow_ctx, engine);
902 mutex_unlock(&dev_priv->drm.struct_mutex);
905 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
906 ring_id, workload, workload->status);
908 scheduler->current_workload[ring_id] = NULL;
910 list_del_init(&workload->list);
912 if (!workload->status) {
913 release_shadow_batch_buffer(workload);
914 release_shadow_wa_ctx(&workload->wa_ctx);
917 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
918 /* if workload->status is not successful means HW GPU
919 * has occurred GPU hang or something wrong with i915/GVT,
920 * and GVT won't inject context switch interrupt to guest.
921 * So this error is a vGPU hang actually to the guest.
922 * According to this we should emunlate a vGPU hang. If
923 * there are pending workloads which are already submitted
924 * from guest, we should clean them up like HW GPU does.
926 * if it is in middle of engine resetting, the pending
927 * workloads won't be submitted to HW GPU and will be
928 * cleaned up during the resetting process later, so doing
929 * the workload clean up here doesn't have any impact.
931 clean_workloads(vgpu, ENGINE_MASK(ring_id));
934 workload->complete(workload);
936 atomic_dec(&s->running_workload_num);
937 wake_up(&scheduler->workload_complete_wq);
939 if (gvt->scheduler.need_reschedule)
940 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
942 mutex_unlock(&gvt->lock);
945 struct workload_thread_param {
946 struct intel_gvt *gvt;
950 static int workload_thread(void *priv)
952 struct workload_thread_param *p = (struct workload_thread_param *)priv;
953 struct intel_gvt *gvt = p->gvt;
954 int ring_id = p->ring_id;
955 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
956 struct intel_vgpu_workload *workload = NULL;
957 struct intel_vgpu *vgpu = NULL;
959 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
960 || IS_KABYLAKE(gvt->dev_priv);
961 DEFINE_WAIT_FUNC(wait, woken_wake_function);
965 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
967 while (!kthread_should_stop()) {
968 add_wait_queue(&scheduler->waitq[ring_id], &wait);
970 workload = pick_next_workload(gvt, ring_id);
973 wait_woken(&wait, TASK_INTERRUPTIBLE,
974 MAX_SCHEDULE_TIMEOUT);
975 } while (!kthread_should_stop());
976 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
981 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
982 workload->ring_id, workload,
985 intel_runtime_pm_get(gvt->dev_priv);
987 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
988 workload->ring_id, workload);
991 intel_uncore_forcewake_get(gvt->dev_priv,
994 mutex_lock(&gvt->lock);
995 ret = dispatch_workload(workload);
996 mutex_unlock(&gvt->lock);
999 vgpu = workload->vgpu;
1000 gvt_vgpu_err("fail to dispatch workload, skip\n");
1004 gvt_dbg_sched("ring id %d wait workload %p\n",
1005 workload->ring_id, workload);
1006 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1009 gvt_dbg_sched("will complete workload %p, status: %d\n",
1010 workload, workload->status);
1012 complete_current_workload(gvt, ring_id);
1014 if (need_force_wake)
1015 intel_uncore_forcewake_put(gvt->dev_priv,
1018 intel_runtime_pm_put(gvt->dev_priv);
1019 if (ret && (vgpu_is_vm_unhealthy(ret)))
1020 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1025 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1027 struct intel_vgpu_submission *s = &vgpu->submission;
1028 struct intel_gvt *gvt = vgpu->gvt;
1029 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1031 if (atomic_read(&s->running_workload_num)) {
1032 gvt_dbg_sched("wait vgpu idle\n");
1034 wait_event(scheduler->workload_complete_wq,
1035 !atomic_read(&s->running_workload_num));
1039 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1041 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1042 struct intel_engine_cs *engine;
1043 enum intel_engine_id i;
1045 gvt_dbg_core("clean workload scheduler\n");
1047 for_each_engine(engine, gvt->dev_priv, i) {
1048 atomic_notifier_chain_unregister(
1049 &engine->context_status_notifier,
1050 &gvt->shadow_ctx_notifier_block[i]);
1051 kthread_stop(scheduler->thread[i]);
1055 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1057 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1058 struct workload_thread_param *param = NULL;
1059 struct intel_engine_cs *engine;
1060 enum intel_engine_id i;
1063 gvt_dbg_core("init workload scheduler\n");
1065 init_waitqueue_head(&scheduler->workload_complete_wq);
1067 for_each_engine(engine, gvt->dev_priv, i) {
1068 init_waitqueue_head(&scheduler->waitq[i]);
1070 param = kzalloc(sizeof(*param), GFP_KERNEL);
1079 scheduler->thread[i] = kthread_run(workload_thread, param,
1080 "gvt workload %d", i);
1081 if (IS_ERR(scheduler->thread[i])) {
1082 gvt_err("fail to create workload thread\n");
1083 ret = PTR_ERR(scheduler->thread[i]);
1087 gvt->shadow_ctx_notifier_block[i].notifier_call =
1088 shadow_context_status_change;
1089 atomic_notifier_chain_register(&engine->context_status_notifier,
1090 &gvt->shadow_ctx_notifier_block[i]);
1094 intel_gvt_clean_workload_scheduler(gvt);
1101 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1104 * This function is called when a vGPU is being destroyed.
1107 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1109 struct intel_vgpu_submission *s = &vgpu->submission;
1111 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1112 i915_gem_context_put(s->shadow_ctx);
1113 kmem_cache_destroy(s->workloads);
1118 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1120 * @engine_mask: engines expected to be reset
1122 * This function is called when a vGPU is being destroyed.
1125 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1126 unsigned long engine_mask)
1128 struct intel_vgpu_submission *s = &vgpu->submission;
1133 clean_workloads(vgpu, engine_mask);
1134 s->ops->reset(vgpu, engine_mask);
1138 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1141 * This function is called when a vGPU is being created.
1144 * Zero on success, negative error code if failed.
1147 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1149 struct intel_vgpu_submission *s = &vgpu->submission;
1150 enum intel_engine_id i;
1151 struct intel_engine_cs *engine;
1154 s->shadow_ctx = i915_gem_context_create_gvt(
1155 &vgpu->gvt->dev_priv->drm);
1156 if (IS_ERR(s->shadow_ctx))
1157 return PTR_ERR(s->shadow_ctx);
1159 if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
1160 s->shadow_ctx->sched.priority = INT_MAX;
1162 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1164 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1165 sizeof(struct intel_vgpu_workload), 0,
1167 offsetof(struct intel_vgpu_workload, rb_tail),
1168 sizeof_field(struct intel_vgpu_workload, rb_tail),
1171 if (!s->workloads) {
1173 goto out_shadow_ctx;
1176 for_each_engine(engine, vgpu->gvt->dev_priv, i)
1177 INIT_LIST_HEAD(&s->workload_q_head[i]);
1179 atomic_set(&s->running_workload_num, 0);
1180 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1185 i915_gem_context_put(s->shadow_ctx);
1190 * intel_vgpu_select_submission_ops - select virtual submission interface
1192 * @interface: expected vGPU virtual submission interface
1194 * This function is called when guest configures submission interface.
1197 * Zero on success, negative error code if failed.
1200 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1201 unsigned long engine_mask,
1202 unsigned int interface)
1204 struct intel_vgpu_submission *s = &vgpu->submission;
1205 const struct intel_vgpu_submission_ops *ops[] = {
1206 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1207 &intel_vgpu_execlist_submission_ops,
1211 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1214 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1218 s->ops->clean(vgpu, engine_mask);
1220 if (interface == 0) {
1222 s->virtual_submission_interface = 0;
1224 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1228 ret = ops[interface]->init(vgpu, engine_mask);
1232 s->ops = ops[interface];
1233 s->virtual_submission_interface = interface;
1236 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1237 vgpu->id, s->ops->name);
1243 * intel_vgpu_destroy_workload - destroy a vGPU workload
1246 * This function is called when destroy a vGPU workload.
1249 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1251 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1253 if (workload->shadow_mm)
1254 intel_vgpu_mm_put(workload->shadow_mm);
1256 kmem_cache_free(s->workloads, workload);
1259 static struct intel_vgpu_workload *
1260 alloc_workload(struct intel_vgpu *vgpu)
1262 struct intel_vgpu_submission *s = &vgpu->submission;
1263 struct intel_vgpu_workload *workload;
1265 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1267 return ERR_PTR(-ENOMEM);
1269 INIT_LIST_HEAD(&workload->list);
1270 INIT_LIST_HEAD(&workload->shadow_bb);
1272 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1273 atomic_set(&workload->shadow_ctx_active, 0);
1275 workload->status = -EINPROGRESS;
1276 workload->shadowed = false;
1277 workload->vgpu = vgpu;
1282 #define RING_CTX_OFF(x) \
1283 offsetof(struct execlist_ring_context, x)
1285 static void read_guest_pdps(struct intel_vgpu *vgpu,
1286 u64 ring_context_gpa, u32 pdp[8])
1291 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
1293 for (i = 0; i < 8; i++)
1294 intel_gvt_hypervisor_read_gpa(vgpu,
1295 gpa + i * 8, &pdp[7 - i], 4);
1298 static int prepare_mm(struct intel_vgpu_workload *workload)
1300 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1301 struct intel_vgpu_mm *mm;
1302 struct intel_vgpu *vgpu = workload->vgpu;
1303 intel_gvt_gtt_type_t root_entry_type;
1304 u64 pdps[GVT_RING_CTX_NR_PDPS];
1306 switch (desc->addressing_mode) {
1307 case 1: /* legacy 32-bit */
1308 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1310 case 3: /* legacy 64-bit */
1311 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1314 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1318 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1320 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1324 workload->shadow_mm = mm;
1328 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1329 ((a)->lrca == (b)->lrca))
1331 #define get_last_workload(q) \
1332 (list_empty(q) ? NULL : container_of(q->prev, \
1333 struct intel_vgpu_workload, list))
1335 * intel_vgpu_create_workload - create a vGPU workload
1337 * @desc: a guest context descriptor
1339 * This function is called when creating a vGPU workload.
1342 * struct intel_vgpu_workload * on success, negative error code in
1343 * pointer if failed.
1346 struct intel_vgpu_workload *
1347 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1348 struct execlist_ctx_descriptor_format *desc)
1350 struct intel_vgpu_submission *s = &vgpu->submission;
1351 struct list_head *q = workload_q_head(vgpu, ring_id);
1352 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1353 struct intel_vgpu_workload *workload = NULL;
1354 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1355 u64 ring_context_gpa;
1356 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1359 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1360 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1361 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1362 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1363 return ERR_PTR(-EINVAL);
1366 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1367 RING_CTX_OFF(ring_header.val), &head, 4);
1369 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1370 RING_CTX_OFF(ring_tail.val), &tail, 4);
1372 head &= RB_HEAD_OFF_MASK;
1373 tail &= RB_TAIL_OFF_MASK;
1375 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1376 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1377 gvt_dbg_el("ctx head %x real head %lx\n", head,
1378 last_workload->rb_tail);
1380 * cannot use guest context head pointer here,
1381 * as it might not be updated at this time
1383 head = last_workload->rb_tail;
1386 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1388 /* record some ring buffer register values for scan and shadow */
1389 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1390 RING_CTX_OFF(rb_start.val), &start, 4);
1391 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1392 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1393 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1394 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1396 workload = alloc_workload(vgpu);
1397 if (IS_ERR(workload))
1400 workload->ring_id = ring_id;
1401 workload->ctx_desc = *desc;
1402 workload->ring_context_gpa = ring_context_gpa;
1403 workload->rb_head = head;
1404 workload->rb_tail = tail;
1405 workload->rb_start = start;
1406 workload->rb_ctl = ctl;
1408 if (ring_id == RCS) {
1409 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1410 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1411 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1412 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1414 workload->wa_ctx.indirect_ctx.guest_gma =
1415 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1416 workload->wa_ctx.indirect_ctx.size =
1417 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1419 workload->wa_ctx.per_ctx.guest_gma =
1420 per_ctx & PER_CTX_ADDR_MASK;
1421 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1424 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1425 workload, ring_id, head, tail, start, ctl);
1427 ret = prepare_mm(workload);
1429 kmem_cache_free(s->workloads, workload);
1430 return ERR_PTR(ret);
1433 /* Only scan and shadow the first workload in the queue
1434 * as there is only one pre-allocated buf-obj for shadow.
1436 if (list_empty(workload_q_head(vgpu, ring_id))) {
1437 intel_runtime_pm_get(dev_priv);
1438 mutex_lock(&dev_priv->drm.struct_mutex);
1439 ret = intel_gvt_scan_and_shadow_workload(workload);
1440 mutex_unlock(&dev_priv->drm.struct_mutex);
1441 intel_runtime_pm_put(dev_priv);
1444 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1445 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1446 intel_vgpu_destroy_workload(workload);
1447 return ERR_PTR(ret);
1454 * intel_vgpu_queue_workload - Qeue a vGPU workload
1455 * @workload: the workload to queue in
1457 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1459 list_add_tail(&workload->list,
1460 workload_q_head(workload->vgpu, workload->ring_id));
1461 intel_gvt_kick_schedule(workload->vgpu->gvt);
1462 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);