drm/i915: Only allocate preempt context when required
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 7 Feb 2018 21:05:44 +0000 (21:05 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 8 Feb 2018 07:30:16 +0000 (07:30 +0000)
If we remove some hardcoded assumptions about the preempt context having
a fixed id, reserved from use by normal user contexts, we may only
allocate the i915_gem_context when required. Then the subsequent
decisions on using preemption reduce to having the preempt context
available.

v2: Include an assert that we don't allocate the preempt context twice.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180207210544.26351-3-chris@chris-wilson.co.uk
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/selftests/mock_gem_device.c

index 8337d15bb0e5dd278fcebc982115ce1849481532..dd9efb9d0e0b52d1e1946f0c5fe90e180ae9e88a 100644 (file)
@@ -449,12 +449,18 @@ destroy_kernel_context(struct i915_gem_context **ctxp)
        i915_gem_context_free(ctx);
 }
 
+static bool needs_preempt_context(struct drm_i915_private *i915)
+{
+       return HAS_LOGICAL_RING_PREEMPTION(i915);
+}
+
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
-       int err;
 
+       /* Reassure ourselves we are only called once */
        GEM_BUG_ON(dev_priv->kernel_context);
+       GEM_BUG_ON(dev_priv->preempt_context);
 
        INIT_LIST_HEAD(&dev_priv->contexts.list);
        INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -468,8 +474,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
        if (IS_ERR(ctx)) {
                DRM_ERROR("Failed to create default global context\n");
-               err = PTR_ERR(ctx);
-               goto err;
+               return PTR_ERR(ctx);
        }
        /*
         * For easy recognisablity, we want the kernel context to be 0 and then
@@ -479,23 +484,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        dev_priv->kernel_context = ctx;
 
        /* highest priority; preempting task */
-       ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
-       if (IS_ERR(ctx)) {
-               DRM_ERROR("Failed to create default preempt context\n");
-               err = PTR_ERR(ctx);
-               goto err_kernel_context;
+       if (needs_preempt_context(dev_priv)) {
+               ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
+               if (!IS_ERR(ctx))
+                       dev_priv->preempt_context = ctx;
+               else
+                       DRM_ERROR("Failed to create preempt context; disabling preemption\n");
        }
-       dev_priv->preempt_context = ctx;
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         dev_priv->engine[RCS]->context_size ? "logical" :
                         "fake");
        return 0;
-
-err_kernel_context:
-       destroy_kernel_context(&dev_priv->kernel_context);
-err:
-       return err;
 }
 
 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -521,7 +521,8 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       destroy_kernel_context(&i915->preempt_context);
+       if (i915->preempt_context)
+               destroy_kernel_context(&i915->preempt_context);
        destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
index 7eebfbb95e89974b450d633a4878c936a4b1c74a..bf634432c9c6c65defc945af264c8be99fda632b 100644 (file)
@@ -631,7 +631,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
         * Similarly the preempt context must always be available so that
         * we can interrupt the engine at any time.
         */
-       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+       if (engine->i915->preempt_context) {
                ring = engine->context_pin(engine,
                                           engine->i915->preempt_context);
                if (IS_ERR(ring)) {
@@ -656,7 +656,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
-       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+       if (engine->i915->preempt_context)
                engine->context_unpin(engine, engine->i915->preempt_context);
 err_unpin_kernel:
        engine->context_unpin(engine, engine->i915->kernel_context);
@@ -686,7 +686,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        if (engine->default_state)
                i915_gem_object_put(engine->default_state);
 
-       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+       if (engine->i915->preempt_context)
                engine->context_unpin(engine, engine->i915->preempt_context);
        engine->context_unpin(engine, engine->i915->kernel_context);
 }
index 4ea65df05e0212dd9bd0774d59b65c8e6cdaebdd..b43b58cc599b44f2158d1b67a6e1e1b7f0bd8cd7 100644 (file)
@@ -688,7 +688,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
                goto unlock;
 
        if (port_isset(port)) {
-               if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+               if (engine->i915->preempt_context) {
                        struct guc_preempt_work *preempt_work =
                                &engine->i915->guc.preempt_work[engine->id];
 
@@ -984,17 +984,19 @@ static int guc_clients_create(struct intel_guc *guc)
        }
        guc->execbuf_client = client;
 
-       client = guc_client_alloc(dev_priv,
-                                 INTEL_INFO(dev_priv)->ring_mask,
-                                 GUC_CLIENT_PRIORITY_KMD_HIGH,
-                                 dev_priv->preempt_context);
-       if (IS_ERR(client)) {
-               DRM_ERROR("Failed to create GuC client for preemption!\n");
-               guc_client_free(guc->execbuf_client);
-               guc->execbuf_client = NULL;
-               return PTR_ERR(client);
+       if (dev_priv->preempt_context) {
+               client = guc_client_alloc(dev_priv,
+                                         INTEL_INFO(dev_priv)->ring_mask,
+                                         GUC_CLIENT_PRIORITY_KMD_HIGH,
+                                         dev_priv->preempt_context);
+               if (IS_ERR(client)) {
+                       DRM_ERROR("Failed to create GuC client for preemption!\n");
+                       guc_client_free(guc->execbuf_client);
+                       guc->execbuf_client = NULL;
+                       return PTR_ERR(client);
+               }
+               guc->preempt_client = client;
        }
-       guc->preempt_client = client;
 
        return 0;
 }
index 449fd1e95f1f3d2e4643810ffd9c473f34ebeb0f..c2c8380a0121c874bcbfcc62af72218498087ac8 100644 (file)
 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
 #define WA_TAIL_DWORDS 2
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define PREEMPT_ID 0x1
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                                            struct intel_engine_cs *engine);
@@ -448,7 +447,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
                &engine->i915->preempt_context->engine[engine->id];
        unsigned int n;
 
-       GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
+       GEM_BUG_ON(engine->execlists.preempt_complete_status !=
+                  upper_32_bits(ce->lrc_desc));
        GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
 
        memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
@@ -528,7 +528,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
                        goto unlock;
 
-               if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
+               if (engine->i915->preempt_context &&
                    rb_entry(rb, struct i915_priolist, node)->priority >
                    max(last->priotree.priority, 0)) {
                        /*
@@ -844,7 +844,7 @@ static void execlists_submission_tasklet(unsigned long data)
                        GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
 
                        if (status & GEN8_CTX_STATUS_COMPLETE &&
-                           buf[2*head + 1] == PREEMPT_ID) {
+                           buf[2*head + 1] == execlists->preempt_complete_status) {
                                GEM_TRACE("%s preempt-idle\n", engine->name);
 
                                execlists_cancel_port_requests(execlists);
@@ -1967,7 +1967,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
        engine->i915->caps.scheduler =
                I915_SCHEDULER_CAP_ENABLED |
                I915_SCHEDULER_CAP_PRIORITY;
-       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+       if (engine->i915->preempt_context)
                engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
 }
 
@@ -2045,6 +2045,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
        engine->execlists.elsp =
                engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
 
+       engine->execlists.preempt_complete_status = ~0u;
+       if (engine->i915->preempt_context)
+               engine->execlists.preempt_complete_status =
+                       upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
+
        return 0;
 
 error:
@@ -2307,7 +2312,7 @@ populate_lr_context(struct i915_gem_context *ctx,
        if (!engine->default_state)
                regs[CTX_CONTEXT_CONTROL + 1] |=
                        _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
-       if (ctx->hw_id == PREEMPT_ID)
+       if (ctx == ctx->i915->preempt_context)
                regs[CTX_CONTEXT_CONTROL + 1] |=
                        _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                           CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
index a0e7a6c2a57cd8f0b66f27e7e5dcc874857fd649..8f1a4badf812d42851962b691f3bcec47e14459b 100644 (file)
@@ -279,6 +279,11 @@ struct intel_engine_execlists {
         * @csb_use_mmio: access csb through mmio, instead of hwsp
         */
        bool csb_use_mmio;
+
+       /**
+        * @preempt_complete_status: expected CSB upon completing preemption
+        */
+       u32 preempt_complete_status;
 };
 
 #define INTEL_ENGINE_CS_MAX_NAME 8
index 1bc61f3f76fce003364eb86625cff6704e9443a1..3175db70cc6e0ce14badf19ea481cf3032dd3057 100644 (file)
@@ -243,16 +243,10 @@ struct drm_i915_private *mock_gem_device(void)
        if (!i915->kernel_context)
                goto err_engine;
 
-       i915->preempt_context = mock_context(i915, NULL);
-       if (!i915->preempt_context)
-               goto err_kernel_context;
-
        WARN_ON(i915_gemfs_init(i915));
 
        return i915;
 
-err_kernel_context:
-       i915_gem_context_put(i915->kernel_context);
 err_engine:
        for_each_engine(engine, i915, id)
                mock_engine_free(engine);