Merge airlied/drm-next into drm-intel-next-queued
[linux-block.git] / drivers / gpu / drm / i915 / gvt / scheduler.c
index ab9a500ba3e90a9dcccf4a86a8ccada432ac973b..0056638b0c16dc2090e826aaa5b9b30c5aed13bb 100644 (file)
@@ -154,9 +154,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        enum intel_engine_id ring_id = req->engine->id;
        struct intel_vgpu_workload *workload;
+       unsigned long flags;
 
        if (!is_gvt_request(req)) {
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (action == INTEL_CONTEXT_SCHEDULE_IN &&
                    scheduler->engine_owner[ring_id]) {
                        /* Switch ring from vGPU to host. */
@@ -164,7 +165,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                                              NULL, ring_id);
                        scheduler->engine_owner[ring_id] = NULL;
                }
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
 
                return NOTIFY_OK;
        }
@@ -175,7 +176,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 
        switch (action) {
        case INTEL_CONTEXT_SCHEDULE_IN:
-               spin_lock_bh(&scheduler->mmio_context_lock);
+               spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (workload->vgpu != scheduler->engine_owner[ring_id]) {
                        /* Switch ring from host to vGPU or vGPU to vGPU. */
                        intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -184,7 +185,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                } else
                        gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
                                      ring_id, workload->vgpu->id);
-               spin_unlock_bh(&scheduler->mmio_context_lock);
+               spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT: