2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
38 #include "gem/i915_gem_pm.h"
39 #include "gt/intel_context.h"
40 #include "gt/intel_ring.h"
43 #include "i915_gem_gtt.h"
46 #define RING_CTX_OFF(x) \
47 offsetof(struct execlist_ring_context, x)
49 static void set_context_pdp_root_pointer(
50 struct execlist_ring_context *ring_context,
55 for (i = 0; i < 8; i++)
56 ring_context->pdps[i].val = pdp[7 - i];
59 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
61 struct drm_i915_gem_object *ctx_obj =
62 workload->req->context->state->obj;
63 struct execlist_ring_context *shadow_ring_context;
66 if (WARN_ON(!workload->shadow_mm))
69 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
72 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
73 shadow_ring_context = kmap(page);
74 set_context_pdp_root_pointer(shadow_ring_context,
75 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
80 * when populating shadow ctx from guest, we should not overrride oa related
81 * registers, so that they will not be overlapped by guest oa configs. Thus
82 * made it possible to capture oa data from host for both host and guests.
84 static void sr_oa_regs(struct intel_vgpu_workload *workload,
85 u32 *reg_state, bool save)
87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
88 u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
89 u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
92 i915_mmio_reg_offset(EU_PERF_CNTL0),
93 i915_mmio_reg_offset(EU_PERF_CNTL1),
94 i915_mmio_reg_offset(EU_PERF_CNTL2),
95 i915_mmio_reg_offset(EU_PERF_CNTL3),
96 i915_mmio_reg_offset(EU_PERF_CNTL4),
97 i915_mmio_reg_offset(EU_PERF_CNTL5),
98 i915_mmio_reg_offset(EU_PERF_CNTL6),
101 if (workload->engine->id != RCS0)
105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
107 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
108 u32 state_offset = ctx_flexeu0 + i * 2;
110 workload->flex_mmio[i] = reg_state[state_offset + 1];
113 reg_state[ctx_oactxctrl] =
114 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
115 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
117 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
118 u32 state_offset = ctx_flexeu0 + i * 2;
119 u32 mmio = flex_mmio[i];
121 reg_state[state_offset] = mmio;
122 reg_state[state_offset + 1] = workload->flex_mmio[i];
127 static int populate_shadow_context(struct intel_vgpu_workload *workload)
129 struct intel_vgpu *vgpu = workload->vgpu;
130 struct intel_gvt *gvt = vgpu->gvt;
131 struct intel_context *ctx = workload->req->context;
132 struct execlist_ring_context *shadow_ring_context;
135 unsigned long context_gpa, context_page_num;
136 unsigned long gpa_base; /* first gpa of consecutive GPAs */
137 unsigned long gpa_size; /* size of consecutive GPAs */
140 GEM_BUG_ON(!intel_context_is_pinned(ctx));
142 context_base = (void *) ctx->lrc_reg_state -
143 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
145 shadow_ring_context = (void *) ctx->lrc_reg_state;
147 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
148 #define COPY_REG(name) \
149 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
150 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
151 #define COPY_REG_MASKED(name) {\
152 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
153 + RING_CTX_OFF(name.val),\
154 &shadow_ring_context->name.val, 4);\
155 shadow_ring_context->name.val |= 0xffff << 16;\
158 COPY_REG_MASKED(ctx_ctrl);
159 COPY_REG(ctx_timestamp);
161 if (workload->engine->id == RCS0) {
162 COPY_REG(bb_per_ctx_ptr);
163 COPY_REG(rcs_indirect_ctx);
164 COPY_REG(rcs_indirect_ctx_offset);
167 #undef COPY_REG_MASKED
169 intel_gvt_hypervisor_read_gpa(vgpu,
170 workload->ring_context_gpa +
171 sizeof(*shadow_ring_context),
172 (void *)shadow_ring_context +
173 sizeof(*shadow_ring_context),
174 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
176 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
178 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
181 gvt_dbg_sched("ring %s workload lrca %x",
182 workload->engine->name,
183 workload->ctx_desc.lrca);
185 context_page_num = workload->engine->context_size;
186 context_page_num = context_page_num >> PAGE_SHIFT;
188 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
189 context_page_num = 19;
191 /* find consecutive GPAs from gma until the first inconsecutive GPA.
192 * read from the continuous GPAs into dst virtual address
195 for (i = 2; i < context_page_num; i++) {
196 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
197 (u32)((workload->ctx_desc.lrca + i) <<
198 I915_GTT_PAGE_SHIFT));
199 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
200 gvt_vgpu_err("Invalid guest context descriptor\n");
205 gpa_base = context_gpa;
206 dst = context_base + (i << I915_GTT_PAGE_SHIFT);
207 } else if (context_gpa != gpa_base + gpa_size)
210 gpa_size += I915_GTT_PAGE_SIZE;
212 if (i == context_page_num - 1)
218 intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
219 gpa_base = context_gpa;
220 gpa_size = I915_GTT_PAGE_SIZE;
221 dst = context_base + (i << I915_GTT_PAGE_SHIFT);
226 static inline bool is_gvt_request(struct i915_request *rq)
228 return intel_context_force_single_submission(rq->context);
231 static void save_ring_hw_state(struct intel_vgpu *vgpu,
232 const struct intel_engine_cs *engine)
234 struct intel_uncore *uncore = engine->uncore;
237 reg = RING_INSTDONE(engine->mmio_base);
238 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
239 intel_uncore_read(uncore, reg);
241 reg = RING_ACTHD(engine->mmio_base);
242 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
243 intel_uncore_read(uncore, reg);
245 reg = RING_ACTHD_UDW(engine->mmio_base);
246 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
247 intel_uncore_read(uncore, reg);
250 static int shadow_context_status_change(struct notifier_block *nb,
251 unsigned long action, void *data)
253 struct i915_request *rq = data;
254 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
255 shadow_ctx_notifier_block[rq->engine->id]);
256 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
257 enum intel_engine_id ring_id = rq->engine->id;
258 struct intel_vgpu_workload *workload;
261 if (!is_gvt_request(rq)) {
262 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
263 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
264 scheduler->engine_owner[ring_id]) {
265 /* Switch ring from vGPU to host. */
266 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
268 scheduler->engine_owner[ring_id] = NULL;
270 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
275 workload = scheduler->current_workload[ring_id];
276 if (unlikely(!workload))
280 case INTEL_CONTEXT_SCHEDULE_IN:
281 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
282 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
283 /* Switch ring from host to vGPU or vGPU to vGPU. */
284 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
285 workload->vgpu, rq->engine);
286 scheduler->engine_owner[ring_id] = workload->vgpu;
288 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
289 ring_id, workload->vgpu->id);
290 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
291 atomic_set(&workload->shadow_ctx_active, 1);
293 case INTEL_CONTEXT_SCHEDULE_OUT:
294 save_ring_hw_state(workload->vgpu, rq->engine);
295 atomic_set(&workload->shadow_ctx_active, 0);
297 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
298 save_ring_hw_state(workload->vgpu, rq->engine);
304 wake_up(&workload->shadow_ctx_status_wq);
309 shadow_context_descriptor_update(struct intel_context *ce,
310 struct intel_vgpu_workload *workload)
312 u64 desc = ce->lrc_desc;
315 * Update bits 0-11 of the context descriptor which includes flags
316 * like GEN8_CTX_* cached in desc_template
318 desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
319 desc |= workload->ctx_desc.addressing_mode <<
320 GEN8_CTX_ADDRESSING_MODE_SHIFT;
325 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
327 struct intel_vgpu *vgpu = workload->vgpu;
328 struct i915_request *req = workload->req;
329 void *shadow_ring_buffer_va;
333 if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
334 intel_vgpu_restore_inhibit_context(vgpu, req);
337 * To track whether a request has started on HW, we can emit a
338 * breadcrumb at the beginning of the request and check its
339 * timeline's HWSP to see if the breadcrumb has advanced past the
340 * start of this request. Actually, the request must have the
341 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
342 * scheduler might get a wrong state of it during reset. Since the
343 * requests from gvt always set the has_init_breadcrumb flag, here
344 * need to do the emit_init_breadcrumb for all the requests.
346 if (req->engine->emit_init_breadcrumb) {
347 err = req->engine->emit_init_breadcrumb(req);
349 gvt_vgpu_err("fail to emit init breadcrumb\n");
354 /* allocate shadow ring buffer */
355 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
357 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
362 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
364 /* get shadow ring buffer va */
365 workload->shadow_ring_buffer_va = cs;
367 memcpy(cs, shadow_ring_buffer_va,
370 cs += workload->rb_len / sizeof(u32);
371 intel_ring_advance(workload->req, cs);
376 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
378 if (!wa_ctx->indirect_ctx.obj)
381 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
382 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
384 wa_ctx->indirect_ctx.obj = NULL;
385 wa_ctx->indirect_ctx.shadow_va = NULL;
388 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
389 struct intel_context *ce)
391 struct intel_vgpu_mm *mm = workload->shadow_mm;
392 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
395 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
396 px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
398 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
399 struct i915_page_directory * const pd =
400 i915_pd_entry(ppgtt->pd, i);
402 px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
408 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
410 struct intel_vgpu *vgpu = workload->vgpu;
411 struct intel_vgpu_submission *s = &vgpu->submission;
412 struct i915_request *rq;
417 rq = i915_request_create(s->shadow[workload->engine->id]);
419 gvt_vgpu_err("fail to allocate gem request\n");
423 workload->req = i915_request_get(rq);
428 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
429 * shadow it as well, include ringbuffer,wa_ctx and ctx.
430 * @workload: an abstract entity for each execlist submission.
432 * This function is called before the workload submitting to i915, to make
433 * sure the content of the workload is valid.
435 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
437 struct intel_vgpu *vgpu = workload->vgpu;
438 struct intel_vgpu_submission *s = &vgpu->submission;
441 lockdep_assert_held(&vgpu->vgpu_lock);
443 if (workload->shadow)
446 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
447 shadow_context_descriptor_update(s->shadow[workload->engine->id],
450 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
454 if (workload->engine->id == RCS0 &&
455 workload->wa_ctx.indirect_ctx.size) {
456 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
461 workload->shadow = true;
465 release_shadow_wa_ctx(&workload->wa_ctx);
469 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
471 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
473 struct intel_gvt *gvt = workload->vgpu->gvt;
474 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
475 struct intel_vgpu_shadow_bb *bb;
478 list_for_each_entry(bb, &workload->shadow_bb, list) {
479 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
480 * is only updated into ring_scan_buffer, not real ring address
481 * allocated in later copy_workload_to_ring_buffer. pls be noted
482 * shadow_ring_buffer_va is now pointed to real ring buffer va
483 * in copy_workload_to_ring_buffer.
487 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
491 /* for non-priv bb, scan&shadow is only for
492 * debugging purpose, so the content of shadow bb
493 * is the same as original bb. Therefore,
494 * here, rather than switch to shadow bb's gma
495 * address, we directly use original batch buffer's
496 * gma address, and send original bb to hardware
499 if (bb->clflush & CLFLUSH_AFTER) {
500 drm_clflush_virt_range(bb->va,
502 bb->clflush &= ~CLFLUSH_AFTER;
504 i915_gem_object_finish_access(bb->obj);
505 bb->accessing = false;
508 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
510 if (IS_ERR(bb->vma)) {
511 ret = PTR_ERR(bb->vma);
515 /* relocate shadow batch buffer */
516 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
517 if (gmadr_bytes == 8)
518 bb->bb_start_cmd_va[2] = 0;
520 /* No one is going to touch shadow bb from now on. */
521 if (bb->clflush & CLFLUSH_AFTER) {
522 drm_clflush_virt_range(bb->va,
524 bb->clflush &= ~CLFLUSH_AFTER;
527 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
532 ret = i915_vma_move_to_active(bb->vma,
538 i915_gem_object_finish_access(bb->obj);
539 bb->accessing = false;
544 release_shadow_batch_buffer(workload);
548 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
550 struct intel_vgpu_workload *workload =
551 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
552 struct i915_request *rq = workload->req;
553 struct execlist_ring_context *shadow_ring_context =
554 (struct execlist_ring_context *)rq->context->lrc_reg_state;
556 shadow_ring_context->bb_per_ctx_ptr.val =
557 (shadow_ring_context->bb_per_ctx_ptr.val &
558 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
559 shadow_ring_context->rcs_indirect_ctx.val =
560 (shadow_ring_context->rcs_indirect_ctx.val &
561 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
564 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
566 struct i915_vma *vma;
567 unsigned char *per_ctx_va =
568 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
569 wa_ctx->indirect_ctx.size;
571 if (wa_ctx->indirect_ctx.size == 0)
574 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
575 0, CACHELINE_BYTES, 0);
579 /* FIXME: we are not tracking our pinned VMA leaving it
580 * up to the core to fix up the stray pin_count upon
584 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
586 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
587 memset(per_ctx_va, 0, CACHELINE_BYTES);
589 update_wa_ctx_2_shadow_ctx(wa_ctx);
593 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
595 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
599 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
601 struct intel_vgpu_shadow_bb *bb, *pos;
603 if (list_empty(&workload->shadow_bb))
606 bb = list_first_entry(&workload->shadow_bb,
607 struct intel_vgpu_shadow_bb, list);
609 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
612 i915_gem_object_finish_access(bb->obj);
614 if (bb->va && !IS_ERR(bb->va))
615 i915_gem_object_unpin_map(bb->obj);
617 if (bb->vma && !IS_ERR(bb->vma)) {
618 i915_vma_unpin(bb->vma);
619 i915_vma_close(bb->vma);
621 i915_gem_object_put(bb->obj);
628 static int prepare_workload(struct intel_vgpu_workload *workload)
630 struct intel_vgpu *vgpu = workload->vgpu;
631 struct intel_vgpu_submission *s = &vgpu->submission;
634 ret = intel_vgpu_pin_mm(workload->shadow_mm);
636 gvt_vgpu_err("fail to vgpu pin mm\n");
640 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
641 !workload->shadow_mm->ppgtt_mm.shadowed) {
642 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
646 update_shadow_pdps(workload);
648 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
650 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
652 gvt_vgpu_err("fail to vgpu sync oos pages\n");
656 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
658 gvt_vgpu_err("fail to flush post shadow\n");
662 ret = copy_workload_to_ring_buffer(workload);
664 gvt_vgpu_err("fail to generate request\n");
668 ret = prepare_shadow_batch_buffer(workload);
670 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
674 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
676 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
677 goto err_shadow_batch;
680 if (workload->prepare) {
681 ret = workload->prepare(workload);
683 goto err_shadow_wa_ctx;
688 release_shadow_wa_ctx(&workload->wa_ctx);
690 release_shadow_batch_buffer(workload);
692 intel_vgpu_unpin_mm(workload->shadow_mm);
696 static int dispatch_workload(struct intel_vgpu_workload *workload)
698 struct intel_vgpu *vgpu = workload->vgpu;
699 struct i915_request *rq;
702 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
703 workload->engine->name, workload);
705 mutex_lock(&vgpu->vgpu_lock);
707 ret = intel_gvt_workload_req_alloc(workload);
711 ret = intel_gvt_scan_and_shadow_workload(workload);
715 ret = populate_shadow_context(workload);
717 release_shadow_wa_ctx(&workload->wa_ctx);
721 ret = prepare_workload(workload);
724 /* We might still need to add request with
725 * clean ctx to retire it properly..
727 rq = fetch_and_zero(&workload->req);
728 i915_request_put(rq);
731 if (!IS_ERR_OR_NULL(workload->req)) {
732 gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
733 workload->engine->name, workload->req);
734 i915_request_add(workload->req);
735 workload->dispatched = true;
739 workload->status = ret;
740 mutex_unlock(&vgpu->vgpu_lock);
744 static struct intel_vgpu_workload *
745 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
747 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
748 struct intel_vgpu_workload *workload = NULL;
750 mutex_lock(&gvt->sched_lock);
753 * no current vgpu / will be scheduled out / no workload
756 if (!scheduler->current_vgpu) {
757 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
761 if (scheduler->need_reschedule) {
762 gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
766 if (!scheduler->current_vgpu->active ||
767 list_empty(workload_q_head(scheduler->current_vgpu, engine)))
771 * still have current workload, maybe the workload disptacher
772 * fail to submit it for some reason, resubmit it.
774 if (scheduler->current_workload[engine->id]) {
775 workload = scheduler->current_workload[engine->id];
776 gvt_dbg_sched("ring %s still have current workload %p\n",
777 engine->name, workload);
782 * pick a workload as current workload
783 * once current workload is set, schedule policy routines
784 * will wait the current workload is finished when trying to
785 * schedule out a vgpu.
787 scheduler->current_workload[engine->id] =
788 list_first_entry(workload_q_head(scheduler->current_vgpu,
790 struct intel_vgpu_workload, list);
792 workload = scheduler->current_workload[engine->id];
794 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
796 atomic_inc(&workload->vgpu->submission.running_workload_num);
798 mutex_unlock(&gvt->sched_lock);
802 static void update_guest_context(struct intel_vgpu_workload *workload)
804 struct i915_request *rq = workload->req;
805 struct intel_vgpu *vgpu = workload->vgpu;
806 struct execlist_ring_context *shadow_ring_context;
807 struct intel_context *ctx = workload->req->context;
810 unsigned long context_gpa, context_page_num;
811 unsigned long gpa_base; /* first gpa of consecutive GPAs */
812 unsigned long gpa_size; /* size of consecutive GPAs*/
818 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
819 workload->ctx_desc.lrca);
821 GEM_BUG_ON(!intel_context_is_pinned(ctx));
823 head = workload->rb_head;
824 tail = workload->rb_tail;
825 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
828 if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
834 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
836 ring_base = rq->engine->mmio_base;
837 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
838 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
840 context_page_num = rq->engine->context_size;
841 context_page_num = context_page_num >> PAGE_SHIFT;
843 if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
844 context_page_num = 19;
846 context_base = (void *) ctx->lrc_reg_state -
847 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
849 /* find consecutive GPAs from gma until the first inconsecutive GPA.
850 * write to the consecutive GPAs from src virtual address
853 for (i = 2; i < context_page_num; i++) {
854 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
855 (u32)((workload->ctx_desc.lrca + i) <<
856 I915_GTT_PAGE_SHIFT));
857 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
858 gvt_vgpu_err("invalid guest context descriptor\n");
863 gpa_base = context_gpa;
864 src = context_base + (i << I915_GTT_PAGE_SHIFT);
865 } else if (context_gpa != gpa_base + gpa_size)
868 gpa_size += I915_GTT_PAGE_SIZE;
870 if (i == context_page_num - 1)
876 intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
877 gpa_base = context_gpa;
878 gpa_size = I915_GTT_PAGE_SIZE;
879 src = context_base + (i << I915_GTT_PAGE_SHIFT);
882 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
883 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
885 shadow_ring_context = (void *) ctx->lrc_reg_state;
887 #define COPY_REG(name) \
888 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
889 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
892 COPY_REG(ctx_timestamp);
896 intel_gvt_hypervisor_write_gpa(vgpu,
897 workload->ring_context_gpa +
898 sizeof(*shadow_ring_context),
899 (void *)shadow_ring_context +
900 sizeof(*shadow_ring_context),
901 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
904 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
905 intel_engine_mask_t engine_mask)
907 struct intel_vgpu_submission *s = &vgpu->submission;
908 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
909 struct intel_engine_cs *engine;
910 struct intel_vgpu_workload *pos, *n;
911 intel_engine_mask_t tmp;
913 /* free the unsubmited workloads in the queues. */
914 for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
915 list_for_each_entry_safe(pos, n,
916 &s->workload_q_head[engine->id], list) {
917 list_del_init(&pos->list);
918 intel_vgpu_destroy_workload(pos);
920 clear_bit(engine->id, s->shadow_ctx_desc_updated);
924 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
926 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
927 struct intel_vgpu_workload *workload =
928 scheduler->current_workload[ring_id];
929 struct intel_vgpu *vgpu = workload->vgpu;
930 struct intel_vgpu_submission *s = &vgpu->submission;
931 struct i915_request *rq = workload->req;
934 mutex_lock(&vgpu->vgpu_lock);
935 mutex_lock(&gvt->sched_lock);
937 /* For the workload w/ request, needs to wait for the context
938 * switch to make sure request is completed.
939 * For the workload w/o request, directly complete the workload.
942 wait_event(workload->shadow_ctx_status_wq,
943 !atomic_read(&workload->shadow_ctx_active));
945 /* If this request caused GPU hang, req->fence.error will
946 * be set to -EIO. Use -EIO to set workload status so
947 * that when this request caused GPU hang, didn't trigger
948 * context switch interrupt to guest.
950 if (likely(workload->status == -EINPROGRESS)) {
951 if (workload->req->fence.error == -EIO)
952 workload->status = -EIO;
954 workload->status = 0;
957 if (!workload->status &&
958 !(vgpu->resetting_eng & BIT(ring_id))) {
959 update_guest_context(workload);
961 for_each_set_bit(event, workload->pending_events,
963 intel_vgpu_trigger_virtual_event(vgpu, event);
966 i915_request_put(fetch_and_zero(&workload->req));
969 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
970 ring_id, workload, workload->status);
972 scheduler->current_workload[ring_id] = NULL;
974 list_del_init(&workload->list);
976 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
977 /* if workload->status is not successful means HW GPU
978 * has occurred GPU hang or something wrong with i915/GVT,
979 * and GVT won't inject context switch interrupt to guest.
980 * So this error is a vGPU hang actually to the guest.
981 * According to this we should emunlate a vGPU hang. If
982 * there are pending workloads which are already submitted
983 * from guest, we should clean them up like HW GPU does.
985 * if it is in middle of engine resetting, the pending
986 * workloads won't be submitted to HW GPU and will be
987 * cleaned up during the resetting process later, so doing
988 * the workload clean up here doesn't have any impact.
990 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
993 workload->complete(workload);
995 atomic_dec(&s->running_workload_num);
996 wake_up(&scheduler->workload_complete_wq);
998 if (gvt->scheduler.need_reschedule)
999 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
1001 mutex_unlock(&gvt->sched_lock);
1002 mutex_unlock(&vgpu->vgpu_lock);
1005 static int workload_thread(void *arg)
1007 struct intel_engine_cs *engine = arg;
1008 const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
1009 struct intel_gvt *gvt = engine->i915->gvt;
1010 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1011 struct intel_vgpu_workload *workload = NULL;
1012 struct intel_vgpu *vgpu = NULL;
1014 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1016 gvt_dbg_core("workload thread for ring %s started\n", engine->name);
1018 while (!kthread_should_stop()) {
1019 intel_wakeref_t wakeref;
1021 add_wait_queue(&scheduler->waitq[engine->id], &wait);
1023 workload = pick_next_workload(gvt, engine);
1026 wait_woken(&wait, TASK_INTERRUPTIBLE,
1027 MAX_SCHEDULE_TIMEOUT);
1028 } while (!kthread_should_stop());
1029 remove_wait_queue(&scheduler->waitq[engine->id], &wait);
1034 gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1035 engine->name, workload,
1036 workload->vgpu->id);
1038 wakeref = intel_runtime_pm_get(engine->uncore->rpm);
1040 gvt_dbg_sched("ring %s will dispatch workload %p\n",
1041 engine->name, workload);
1043 if (need_force_wake)
1044 intel_uncore_forcewake_get(engine->uncore,
1047 * Update the vReg of the vGPU which submitted this
1048 * workload. The vGPU may use these registers for checking
1049 * the context state. The value comes from GPU commands
1052 update_vreg_in_ctx(workload);
1054 ret = dispatch_workload(workload);
1057 vgpu = workload->vgpu;
1058 gvt_vgpu_err("fail to dispatch workload, skip\n");
1062 gvt_dbg_sched("ring %s wait workload %p\n",
1063 engine->name, workload);
1064 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1067 gvt_dbg_sched("will complete workload %p, status: %d\n",
1068 workload, workload->status);
1070 complete_current_workload(gvt, engine->id);
1072 if (need_force_wake)
1073 intel_uncore_forcewake_put(engine->uncore,
1076 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1077 if (ret && (vgpu_is_vm_unhealthy(ret)))
1078 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1083 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1085 struct intel_vgpu_submission *s = &vgpu->submission;
1086 struct intel_gvt *gvt = vgpu->gvt;
1087 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1089 if (atomic_read(&s->running_workload_num)) {
1090 gvt_dbg_sched("wait vgpu idle\n");
1092 wait_event(scheduler->workload_complete_wq,
1093 !atomic_read(&s->running_workload_num));
1097 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1099 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1100 struct intel_engine_cs *engine;
1101 enum intel_engine_id i;
1103 gvt_dbg_core("clean workload scheduler\n");
1105 for_each_engine(engine, gvt->gt, i) {
1106 atomic_notifier_chain_unregister(
1107 &engine->context_status_notifier,
1108 &gvt->shadow_ctx_notifier_block[i]);
1109 kthread_stop(scheduler->thread[i]);
1113 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1115 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1116 struct intel_engine_cs *engine;
1117 enum intel_engine_id i;
1120 gvt_dbg_core("init workload scheduler\n");
1122 init_waitqueue_head(&scheduler->workload_complete_wq);
1124 for_each_engine(engine, gvt->gt, i) {
1125 init_waitqueue_head(&scheduler->waitq[i]);
1127 scheduler->thread[i] = kthread_run(workload_thread, engine,
1128 "gvt:%s", engine->name);
1129 if (IS_ERR(scheduler->thread[i])) {
1130 gvt_err("fail to create workload thread\n");
1131 ret = PTR_ERR(scheduler->thread[i]);
1135 gvt->shadow_ctx_notifier_block[i].notifier_call =
1136 shadow_context_status_change;
1137 atomic_notifier_chain_register(&engine->context_status_notifier,
1138 &gvt->shadow_ctx_notifier_block[i]);
1144 intel_gvt_clean_workload_scheduler(gvt);
1149 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1150 struct i915_ppgtt *ppgtt)
1154 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1155 px_dma(ppgtt->pd) = s->i915_context_pml4;
1157 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1158 struct i915_page_directory * const pd =
1159 i915_pd_entry(ppgtt->pd, i);
1161 px_dma(pd) = s->i915_context_pdps[i];
1167 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1170 * This function is called when a vGPU is being destroyed.
1173 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1175 struct intel_vgpu_submission *s = &vgpu->submission;
1176 struct intel_engine_cs *engine;
1177 enum intel_engine_id id;
1179 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1181 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
1182 for_each_engine(engine, vgpu->gvt->gt, id)
1183 intel_context_unpin(s->shadow[id]);
1185 kmem_cache_destroy(s->workloads);
1190 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1192 * @engine_mask: engines expected to be reset
1194 * This function is called when a vGPU is being destroyed.
1197 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1198 intel_engine_mask_t engine_mask)
1200 struct intel_vgpu_submission *s = &vgpu->submission;
1205 intel_vgpu_clean_workloads(vgpu, engine_mask);
1206 s->ops->reset(vgpu, engine_mask);
1210 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1211 struct i915_ppgtt *ppgtt)
1215 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1216 s->i915_context_pml4 = px_dma(ppgtt->pd);
1218 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1219 struct i915_page_directory * const pd =
1220 i915_pd_entry(ppgtt->pd, i);
1222 s->i915_context_pdps[i] = px_dma(pd);
1228 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1231 * This function is called when a vGPU is being created.
1234 * Zero on success, negative error code if failed.
1237 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1239 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1240 struct intel_vgpu_submission *s = &vgpu->submission;
1241 struct intel_engine_cs *engine;
1242 struct i915_ppgtt *ppgtt;
1243 enum intel_engine_id i;
1246 ppgtt = i915_ppgtt_create(&i915->gt);
1248 return PTR_ERR(ppgtt);
1250 i915_context_ppgtt_root_save(s, ppgtt);
1252 for_each_engine(engine, vgpu->gvt->gt, i) {
1253 struct intel_context *ce;
1255 INIT_LIST_HEAD(&s->workload_q_head[i]);
1256 s->shadow[i] = ERR_PTR(-EINVAL);
1258 ce = intel_context_create(engine);
1261 goto out_shadow_ctx;
1264 i915_vm_put(ce->vm);
1265 ce->vm = i915_vm_get(&ppgtt->vm);
1266 intel_context_set_single_submission(ce);
1268 if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
1269 const unsigned int ring_size = 512 * SZ_4K;
1271 ce->ring = __intel_context_ring_size(ring_size);
1274 ret = intel_context_pin(ce);
1275 intel_context_put(ce);
1277 goto out_shadow_ctx;
1282 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1284 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1285 sizeof(struct intel_vgpu_workload), 0,
1287 offsetof(struct intel_vgpu_workload, rb_tail),
1288 sizeof_field(struct intel_vgpu_workload, rb_tail),
1291 if (!s->workloads) {
1293 goto out_shadow_ctx;
1296 atomic_set(&s->running_workload_num, 0);
1297 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1299 i915_vm_put(&ppgtt->vm);
1303 i915_context_ppgtt_root_restore(s, ppgtt);
1304 for_each_engine(engine, vgpu->gvt->gt, i) {
1305 if (IS_ERR(s->shadow[i]))
1308 intel_context_unpin(s->shadow[i]);
1309 intel_context_put(s->shadow[i]);
1311 i915_vm_put(&ppgtt->vm);
1316 * intel_vgpu_select_submission_ops - select virtual submission interface
1318 * @engine_mask: either ALL_ENGINES or target engine mask
1319 * @interface: expected vGPU virtual submission interface
1321 * This function is called when guest configures submission interface.
1324 * Zero on success, negative error code if failed.
1327 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1328 intel_engine_mask_t engine_mask,
1329 unsigned int interface)
1331 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1332 struct intel_vgpu_submission *s = &vgpu->submission;
1333 const struct intel_vgpu_submission_ops *ops[] = {
1334 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1335 &intel_vgpu_execlist_submission_ops,
1339 if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
1342 if (drm_WARN_ON(&i915->drm,
1343 interface == 0 && engine_mask != ALL_ENGINES))
1347 s->ops->clean(vgpu, engine_mask);
1349 if (interface == 0) {
1351 s->virtual_submission_interface = 0;
1353 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1357 ret = ops[interface]->init(vgpu, engine_mask);
1361 s->ops = ops[interface];
1362 s->virtual_submission_interface = interface;
1365 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1366 vgpu->id, s->ops->name);
1372 * intel_vgpu_destroy_workload - destroy a vGPU workload
1373 * @workload: workload to destroy
1375 * This function is called when destroy a vGPU workload.
1378 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1380 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1382 release_shadow_batch_buffer(workload);
1383 release_shadow_wa_ctx(&workload->wa_ctx);
1385 if (workload->shadow_mm)
1386 intel_vgpu_mm_put(workload->shadow_mm);
1388 kmem_cache_free(s->workloads, workload);
1391 static struct intel_vgpu_workload *
1392 alloc_workload(struct intel_vgpu *vgpu)
1394 struct intel_vgpu_submission *s = &vgpu->submission;
1395 struct intel_vgpu_workload *workload;
1397 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1399 return ERR_PTR(-ENOMEM);
1401 INIT_LIST_HEAD(&workload->list);
1402 INIT_LIST_HEAD(&workload->shadow_bb);
1404 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1405 atomic_set(&workload->shadow_ctx_active, 0);
1407 workload->status = -EINPROGRESS;
1408 workload->vgpu = vgpu;
1413 #define RING_CTX_OFF(x) \
1414 offsetof(struct execlist_ring_context, x)
1416 static void read_guest_pdps(struct intel_vgpu *vgpu,
1417 u64 ring_context_gpa, u32 pdp[8])
1422 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1424 for (i = 0; i < 8; i++)
1425 intel_gvt_hypervisor_read_gpa(vgpu,
1426 gpa + i * 8, &pdp[7 - i], 4);
1429 static int prepare_mm(struct intel_vgpu_workload *workload)
1431 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1432 struct intel_vgpu_mm *mm;
1433 struct intel_vgpu *vgpu = workload->vgpu;
1434 enum intel_gvt_gtt_type root_entry_type;
1435 u64 pdps[GVT_RING_CTX_NR_PDPS];
1437 switch (desc->addressing_mode) {
1438 case 1: /* legacy 32-bit */
1439 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1441 case 3: /* legacy 64-bit */
1442 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1445 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1449 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1451 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1455 workload->shadow_mm = mm;
1459 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1460 ((a)->lrca == (b)->lrca))
1463 * intel_vgpu_create_workload - create a vGPU workload
1465 * @engine: the engine
1466 * @desc: a guest context descriptor
1468 * This function is called when creating a vGPU workload.
1471 * struct intel_vgpu_workload * on success, negative error code in
1472 * pointer if failed.
1475 struct intel_vgpu_workload *
1476 intel_vgpu_create_workload(struct intel_vgpu *vgpu,
1477 const struct intel_engine_cs *engine,
1478 struct execlist_ctx_descriptor_format *desc)
1480 struct intel_vgpu_submission *s = &vgpu->submission;
1481 struct list_head *q = workload_q_head(vgpu, engine);
1482 struct intel_vgpu_workload *last_workload = NULL;
1483 struct intel_vgpu_workload *workload = NULL;
1484 u64 ring_context_gpa;
1485 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1489 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1490 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1491 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1492 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1493 return ERR_PTR(-EINVAL);
1496 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1497 RING_CTX_OFF(ring_header.val), &head, 4);
1499 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1500 RING_CTX_OFF(ring_tail.val), &tail, 4);
1504 head &= RB_HEAD_OFF_MASK;
1505 tail &= RB_TAIL_OFF_MASK;
1507 list_for_each_entry_reverse(last_workload, q, list) {
1509 if (same_context(&last_workload->ctx_desc, desc)) {
1510 gvt_dbg_el("ring %s cur workload == last\n",
1512 gvt_dbg_el("ctx head %x real head %lx\n", head,
1513 last_workload->rb_tail);
1515 * cannot use guest context head pointer here,
1516 * as it might not be updated at this time
1518 head = last_workload->rb_tail;
1523 gvt_dbg_el("ring %s begin a new workload\n", engine->name);
1525 /* record some ring buffer register values for scan and shadow */
1526 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1527 RING_CTX_OFF(rb_start.val), &start, 4);
1528 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1529 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1530 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1531 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1533 if (!intel_gvt_ggtt_validate_range(vgpu, start,
1534 _RING_CTL_BUF_SIZE(ctl))) {
1535 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1536 return ERR_PTR(-EINVAL);
1539 workload = alloc_workload(vgpu);
1540 if (IS_ERR(workload))
1543 workload->engine = engine;
1544 workload->ctx_desc = *desc;
1545 workload->ring_context_gpa = ring_context_gpa;
1546 workload->rb_head = head;
1547 workload->guest_rb_head = guest_head;
1548 workload->rb_tail = tail;
1549 workload->rb_start = start;
1550 workload->rb_ctl = ctl;
1552 if (engine->id == RCS0) {
1553 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1554 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1555 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1556 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1558 workload->wa_ctx.indirect_ctx.guest_gma =
1559 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1560 workload->wa_ctx.indirect_ctx.size =
1561 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1564 if (workload->wa_ctx.indirect_ctx.size != 0) {
1565 if (!intel_gvt_ggtt_validate_range(vgpu,
1566 workload->wa_ctx.indirect_ctx.guest_gma,
1567 workload->wa_ctx.indirect_ctx.size)) {
1568 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1569 workload->wa_ctx.indirect_ctx.guest_gma);
1570 kmem_cache_free(s->workloads, workload);
1571 return ERR_PTR(-EINVAL);
1575 workload->wa_ctx.per_ctx.guest_gma =
1576 per_ctx & PER_CTX_ADDR_MASK;
1577 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1578 if (workload->wa_ctx.per_ctx.valid) {
1579 if (!intel_gvt_ggtt_validate_range(vgpu,
1580 workload->wa_ctx.per_ctx.guest_gma,
1582 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1583 workload->wa_ctx.per_ctx.guest_gma);
1584 kmem_cache_free(s->workloads, workload);
1585 return ERR_PTR(-EINVAL);
1590 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1591 workload, engine->name, head, tail, start, ctl);
1593 ret = prepare_mm(workload);
1595 kmem_cache_free(s->workloads, workload);
1596 return ERR_PTR(ret);
1599 /* Only scan and shadow the first workload in the queue
1600 * as there is only one pre-allocated buf-obj for shadow.
1602 if (list_empty(q)) {
1603 intel_wakeref_t wakeref;
1605 with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
1606 ret = intel_gvt_scan_and_shadow_workload(workload);
1610 if (vgpu_is_vm_unhealthy(ret))
1611 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1612 intel_vgpu_destroy_workload(workload);
1613 return ERR_PTR(ret);
1620 * intel_vgpu_queue_workload - Qeue a vGPU workload
1621 * @workload: the workload to queue in
1623 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1625 list_add_tail(&workload->list,
1626 workload_q_head(workload->vgpu, workload->engine));
1627 intel_gvt_kick_schedule(workload->vgpu->gvt);
1628 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);