drm/i915: Drop struct_mutex from around i915_retire_requests()
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 4 Oct 2019 13:40:02 +0000 (14:40 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 4 Oct 2019 14:39:17 +0000 (15:39 +0100)
We don't need to hold struct_mutex now for retiring requests, so drop it
from i915_retire_requests() and i915_gem_wait_for_idle(), finally
removing I915_WAIT_LOCKED for good.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-8-chris@chris-wilson.co.uk
26 files changed:
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/selftest_context.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/selftests/i915_active.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/i915_selftest.c
drivers/gpu/drm/i915/selftests/i915_vma.c
drivers/gpu/drm/i915/selftests/igt_flush_test.c
drivers/gpu/drm/i915/selftests/igt_flush_test.h
drivers/gpu/drm/i915/selftests/igt_live_test.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c

index c1fca5728e6ecb9b6474be3e46beb79e17732f0e..81366aa4812b84e6c5dfff760a0e89bd346b2ddf 100644 (file)
@@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
 static void clear_pages_worker(struct work_struct *work)
 {
        struct clear_pages_work *w = container_of(work, typeof(*w), work);
-       struct drm_i915_private *i915 = w->ce->engine->i915;
        struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
        struct i915_vma *vma = w->sleeve->vma;
        struct i915_request *rq;
@@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
        obj->read_domains = I915_GEM_GPU_DOMAINS;
        obj->write_domain = 0;
 
-       /* XXX: we need to kill this */
-       mutex_lock(&i915->drm.struct_mutex);
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (unlikely(err))
-               goto out_unlock;
+               goto out_signal;
 
        batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
        if (IS_ERR(batch)) {
@@ -229,8 +226,6 @@ out_batch:
        intel_emit_vma_release(w->ce, batch);
 out_unpin:
        i915_vma_unpin(vma);
-out_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
 out_signal:
        if (unlikely(err)) {
                dma_fence_set_error(&w->dma, err);
index 9d85aab68d34e6dfc550780fb6a19bd68bd830f5..0ab416887fc2afa418d1b854091838862a6e1e79 100644 (file)
@@ -1159,8 +1159,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 }
 
 static int
-__intel_context_reconfigure_sseu(struct intel_context *ce,
-                                struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
 {
        int ret;
 
@@ -1183,23 +1182,6 @@ unlock:
        return ret;
 }
 
-static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
-       struct drm_i915_private *i915 = ce->engine->i915;
-       int ret;
-
-       ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-       if (ret)
-               return ret;
-
-       ret = __intel_context_reconfigure_sseu(ce, sseu);
-
-       mutex_unlock(&i915->drm.struct_mutex);
-
-       return ret;
-}
-
 static int
 user_to_context_sseu(struct drm_i915_private *i915,
                     const struct drm_i915_gem_context_param_sseu *user,
index 5180b2ee1cb725838f00db329458deb4b804fbfe..2ddc3aeaac9d42a72ea352c921bd31551bd95453 100644 (file)
@@ -48,11 +48,7 @@ static void retire_work_handler(struct work_struct *work)
        struct drm_i915_private *i915 =
                container_of(work, typeof(*i915), gem.retire_work.work);
 
-       /* Come back later if the device is busy... */
-       if (mutex_trylock(&i915->drm.struct_mutex)) {
-               i915_retire_requests(i915);
-               mutex_unlock(&i915->drm.struct_mutex);
-       }
+       i915_retire_requests(i915);
 
        queue_delayed_work(i915->wq,
                           &i915->gem.retire_work,
@@ -86,26 +82,23 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
 {
        bool result = !intel_gt_is_wedged(gt);
 
-       do {
-               if (i915_gem_wait_for_idle(gt->i915,
-                                          I915_WAIT_LOCKED |
-                                          I915_WAIT_FOR_IDLE_BOOST,
-                                          I915_GEM_IDLE_TIMEOUT) == -ETIME) {
-                       /* XXX hide warning from gem_eio */
-                       if (i915_modparams.reset) {
-                               dev_err(gt->i915->drm.dev,
-                                       "Failed to idle engines, declaring wedged!\n");
-                               GEM_TRACE_DUMP();
-                       }
-
-                       /*
-                        * Forcibly cancel outstanding work and leave
-                        * the gpu quiet.
-                        */
-                       intel_gt_set_wedged(gt);
-                       result = false;
+       if (i915_gem_wait_for_idle(gt->i915,
+                                  I915_WAIT_FOR_IDLE_BOOST,
+                                  I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+               /* XXX hide warning from gem_eio */
+               if (i915_modparams.reset) {
+                       dev_err(gt->i915->drm.dev,
+                               "Failed to idle engines, declaring wedged!\n");
+                       GEM_TRACE_DUMP();
                }
-       } while (i915_retire_requests(gt->i915) && result);
+
+               /*
+                * Forcibly cancel outstanding work and leave
+                * the gpu quiet.
+                */
+               intel_gt_set_wedged(gt);
+               result = false;
+       }
 
        if (intel_gt_pm_wait_for_idle(gt))
                result = false;
@@ -145,8 +138,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 
        user_forcewake(&i915->gt, true);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        /*
         * We have to flush all the executing contexts to main memory so
         * that they can saved in the hibernation image. To ensure the last
@@ -158,8 +149,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
         */
        switch_to_kernel_context_sync(&i915->gt);
 
-       mutex_unlock(&i915->drm.struct_mutex);
-
        cancel_delayed_work_sync(&i915->gt.hangcheck.work);
 
        i915_gem_drain_freed_objects(i915);
index 0ff7a89aadca1de7ab0287d298c5feeba5ac325b..549810f70aeb6f4b1ffd95435b168b940077481a 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/prime_numbers.h>
 
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
 
 #include "i915_selftest.h"
 #include "selftests/i915_random.h"
@@ -78,7 +79,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
 {
        struct i915_vma *vma;
        u32 __iomem *map;
-       int err;
+       int err = 0;
 
        i915_gem_object_lock(obj);
        err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -90,15 +91,21 @@ static int gtt_set(struct drm_i915_gem_object *obj,
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
+       intel_gt_pm_get(vma->vm->gt);
+
        map = i915_vma_pin_iomap(vma);
        i915_vma_unpin(vma);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
+       if (IS_ERR(map)) {
+               err = PTR_ERR(map);
+               goto out_rpm;
+       }
 
        iowrite32(v, &map[offset / sizeof(*map)]);
        i915_vma_unpin_iomap(vma);
 
-       return 0;
+out_rpm:
+       intel_gt_pm_put(vma->vm->gt);
+       return err;
 }
 
 static int gtt_get(struct drm_i915_gem_object *obj,
@@ -107,7 +114,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
 {
        struct i915_vma *vma;
        u32 __iomem *map;
-       int err;
+       int err = 0;
 
        i915_gem_object_lock(obj);
        err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -119,15 +126,21 @@ static int gtt_get(struct drm_i915_gem_object *obj,
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
+       intel_gt_pm_get(vma->vm->gt);
+
        map = i915_vma_pin_iomap(vma);
        i915_vma_unpin(vma);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
+       if (IS_ERR(map)) {
+               err = PTR_ERR(map);
+               goto out_rpm;
+       }
 
        *v = ioread32(&map[offset / sizeof(*map)]);
        i915_vma_unpin_iomap(vma);
 
-       return 0;
+out_rpm:
+       intel_gt_pm_put(vma->vm->gt);
+       return err;
 }
 
 static int wc_set(struct drm_i915_gem_object *obj,
@@ -280,7 +293,6 @@ static int igt_gem_coherency(void *arg)
        struct drm_i915_private *i915 = arg;
        const struct igt_coherency_mode *read, *write, *over;
        struct drm_i915_gem_object *obj;
-       intel_wakeref_t wakeref;
        unsigned long count, n;
        u32 *offsets, *values;
        int err = 0;
@@ -299,8 +311,6 @@ static int igt_gem_coherency(void *arg)
 
        values = offsets + ncachelines;
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        for (over = igt_coherency_mode; over->name; over++) {
                if (!over->set)
                        continue;
@@ -326,7 +336,7 @@ static int igt_gem_coherency(void *arg)
                                        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
                                        if (IS_ERR(obj)) {
                                                err = PTR_ERR(obj);
-                                               goto unlock;
+                                               goto free;
                                        }
 
                                        i915_random_reorder(offsets, ncachelines, &prng);
@@ -377,15 +387,13 @@ static int igt_gem_coherency(void *arg)
                        }
                }
        }
-unlock:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
+free:
        kfree(offsets);
        return err;
 
 put_object:
        i915_gem_object_put(obj);
-       goto unlock;
+       goto free;
 }
 
 int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
index 8eba0d3a31def8fc48078b3719d330dc29c272b3..f5402aad9b5a08e9d8d7a78d1c0818d6f3238758 100644 (file)
@@ -164,7 +164,6 @@ struct parallel_switch {
 static int __live_parallel_switch1(void *data)
 {
        struct parallel_switch *arg = data;
-       struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
        IGT_TIMEOUT(end_time);
        unsigned long count;
 
@@ -176,16 +175,12 @@ static int __live_parallel_switch1(void *data)
                for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
                        i915_request_put(rq);
 
-                       mutex_lock(&i915->drm.struct_mutex);
                        rq = i915_request_create(arg->ce[n]);
-                       if (IS_ERR(rq)) {
-                               mutex_unlock(&i915->drm.struct_mutex);
+                       if (IS_ERR(rq))
                                return PTR_ERR(rq);
-                       }
 
                        i915_request_get(rq);
                        i915_request_add(rq);
-                       mutex_unlock(&i915->drm.struct_mutex);
                }
 
                err = 0;
@@ -205,7 +200,6 @@ static int __live_parallel_switch1(void *data)
 static int __live_parallel_switchN(void *data)
 {
        struct parallel_switch *arg = data;
-       struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
        IGT_TIMEOUT(end_time);
        unsigned long count;
        int n;
@@ -215,15 +209,11 @@ static int __live_parallel_switchN(void *data)
                for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
                        struct i915_request *rq;
 
-                       mutex_lock(&i915->drm.struct_mutex);
                        rq = i915_request_create(arg->ce[n]);
-                       if (IS_ERR(rq)) {
-                               mutex_unlock(&i915->drm.struct_mutex);
+                       if (IS_ERR(rq))
                                return PTR_ERR(rq);
-                       }
 
                        i915_request_add(rq);
-                       mutex_unlock(&i915->drm.struct_mutex);
                }
 
                count++;
@@ -1173,7 +1163,7 @@ __sseu_test(const char *name,
        if (ret)
                return ret;
 
-       ret = __intel_context_reconfigure_sseu(ce, sseu);
+       ret = intel_context_reconfigure_sseu(ce, sseu);
        if (ret)
                goto out_spin;
 
@@ -1277,7 +1267,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
                goto out_fail;
 
 out_fail:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                ret = -EIO;
 
        intel_context_unpin(ce);
index 36aca1c172e77c0bd1e0061e281bd9cee189093e..856b8e467ee819de200d3f83a54b6a34d1f3f047 100644 (file)
@@ -581,12 +581,8 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 
 static void restore_retire_worker(struct drm_i915_private *i915)
 {
+       igt_flush_test(i915);
        intel_gt_pm_put(&i915->gt);
-
-       mutex_lock(&i915->drm.struct_mutex);
-       igt_flush_test(i915, I915_WAIT_LOCKED);
-       mutex_unlock(&i915->drm.struct_mutex);
-
        i915_gem_driver_register__shrinker(i915);
 }
 
index c21d747e7d05bfa76c50599fdc7f9091f66f4266..9ec55b3a3815efb5d0ceadedda6966393a2b232e 100644 (file)
@@ -65,9 +65,7 @@ static int igt_fill_blt(void *arg)
                if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
                        obj->cache_dirty = true;
 
-               mutex_lock(&i915->drm.struct_mutex);
                err = i915_gem_object_fill_blt(obj, ce, val);
-               mutex_unlock(&i915->drm.struct_mutex);
                if (err)
                        goto err_unpin;
 
@@ -166,9 +164,7 @@ static int igt_copy_blt(void *arg)
                if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
                        dst->cache_dirty = true;
 
-               mutex_lock(&i915->drm.struct_mutex);
                err = i915_gem_object_copy_blt(src, dst, ce);
-               mutex_unlock(&i915->drm.struct_mutex);
                if (err)
                        goto err_unpin;
 
index 29fa1dabbc2e073d45635aae916685d08456deea..d4cefdd3843139b87f20ae1c258bb53a15c09fe1 100644 (file)
@@ -196,26 +196,14 @@ int intel_gt_resume(struct intel_gt *gt)
 
 static void wait_for_idle(struct intel_gt *gt)
 {
-       mutex_lock(&gt->i915->drm.struct_mutex); /* XXX */
-       do {
-               if (i915_gem_wait_for_idle(gt->i915,
-                                          I915_WAIT_LOCKED,
-                                          I915_GEM_IDLE_TIMEOUT) == -ETIME) {
-                       /* XXX hide warning from gem_eio */
-                       if (i915_modparams.reset) {
-                               dev_err(gt->i915->drm.dev,
-                                       "Failed to idle engines, declaring wedged!\n");
-                               GEM_TRACE_DUMP();
-                       }
-
-                       /*
-                        * Forcibly cancel outstanding work and leave
-                        * the gpu quiet.
-                        */
-                       intel_gt_set_wedged(gt);
-               }
-       } while (i915_retire_requests(gt->i915));
-       mutex_unlock(&gt->i915->drm.struct_mutex);
+       if (i915_gem_wait_for_idle(gt->i915, 0,
+                                  I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+               /*
+                * Forcibly cancel outstanding work and leave
+                * the gpu quiet.
+                */
+               intel_gt_set_wedged(gt);
+       }
 
        intel_gt_pm_wait_for_idle(gt);
 }
index e6bcbe7ab5e11da2fcf78925746d52ae48e9fbfe..86cffbb0a9cb6aa2433c777b64a58edb09cb3605 100644 (file)
@@ -318,7 +318,7 @@ static int live_active_context(void *arg)
                if (err)
                        break;
 
-               err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        }
@@ -431,7 +431,7 @@ static int live_remote_context(void *arg)
                if (err)
                        break;
 
-               err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        }
index d3bee9f880088e81060fe1bbbd3a83e4c713d7ee..ffbb3d23b88738c936255c5b12b6336686f8186c 100644 (file)
@@ -58,7 +58,9 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
        memset(h, 0, sizeof(*h));
        h->gt = gt;
 
+       mutex_lock(&gt->i915->drm.struct_mutex);
        h->ctx = kernel_context(gt->i915);
+       mutex_unlock(&gt->i915->drm.struct_mutex);
        if (IS_ERR(h->ctx))
                return PTR_ERR(h->ctx);
 
@@ -285,7 +287,7 @@ static void hang_fini(struct hang *h)
 
        kernel_context_close(h->ctx);
 
-       igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
+       igt_flush_test(h->gt->i915);
 }
 
 static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -309,10 +311,9 @@ static int igt_hang_sanitycheck(void *arg)
 
        /* Basic check that we can execute our hanging batch */
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
        err = hang_init(&h, gt);
        if (err)
-               goto unlock;
+               return err;
 
        for_each_engine(engine, gt->i915, id) {
                struct intel_wedge_me w;
@@ -355,8 +356,6 @@ static int igt_hang_sanitycheck(void *arg)
 
 fini:
        hang_fini(&h);
-unlock:
-       mutex_unlock(&gt->i915->drm.struct_mutex);
        return err;
 }
 
@@ -395,8 +394,6 @@ static int igt_reset_nop(void *arg)
        reset_count = i915_reset_count(global);
        count = 0;
        do {
-               mutex_lock(&gt->i915->drm.struct_mutex);
-
                for_each_engine(engine, gt->i915, id) {
                        int i;
 
@@ -417,7 +414,6 @@ static int igt_reset_nop(void *arg)
                intel_gt_reset(gt, ALL_ENGINES, NULL);
                igt_global_reset_unlock(gt);
 
-               mutex_unlock(&gt->i915->drm.struct_mutex);
                if (intel_gt_is_wedged(gt)) {
                        err = -EIO;
                        break;
@@ -429,16 +425,13 @@ static int igt_reset_nop(void *arg)
                        break;
                }
 
-               err = igt_flush_test(gt->i915, 0);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        } while (time_before(jiffies, end_time));
        pr_info("%s: %d resets\n", __func__, count);
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
-       err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
-       mutex_unlock(&gt->i915->drm.struct_mutex);
-
+       err = igt_flush_test(gt->i915);
 out:
        mock_file_free(gt->i915, file);
        if (intel_gt_is_wedged(gt))
@@ -494,7 +487,6 @@ static int igt_reset_nop_engine(void *arg)
                                break;
                        }
 
-                       mutex_lock(&gt->i915->drm.struct_mutex);
                        for (i = 0; i < 16; i++) {
                                struct i915_request *rq;
 
@@ -507,7 +499,6 @@ static int igt_reset_nop_engine(void *arg)
                                i915_request_add(rq);
                        }
                        err = intel_engine_reset(engine, NULL);
-                       mutex_unlock(&gt->i915->drm.struct_mutex);
                        if (err) {
                                pr_err("i915_reset_engine failed\n");
                                break;
@@ -533,15 +524,12 @@ static int igt_reset_nop_engine(void *arg)
                if (err)
                        break;
 
-               err = igt_flush_test(gt->i915, 0);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        }
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
-       err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
-       mutex_unlock(&gt->i915->drm.struct_mutex);
-
+       err = igt_flush_test(gt->i915);
 out:
        mock_file_free(gt->i915, file);
        if (intel_gt_is_wedged(gt))
@@ -563,9 +551,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                return 0;
 
        if (active) {
-               mutex_lock(&gt->i915->drm.struct_mutex);
                err = hang_init(&h, gt);
-               mutex_unlock(&gt->i915->drm.struct_mutex);
                if (err)
                        return err;
        }
@@ -593,17 +579,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                        if (active) {
                                struct i915_request *rq;
 
-                               mutex_lock(&gt->i915->drm.struct_mutex);
                                rq = hang_create_request(&h, engine);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
-                                       mutex_unlock(&gt->i915->drm.struct_mutex);
                                        break;
                                }
 
                                i915_request_get(rq);
                                i915_request_add(rq);
-                               mutex_unlock(&gt->i915->drm.struct_mutex);
 
                                if (!wait_until_running(&h, rq)) {
                                        struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -647,7 +630,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                if (err)
                        break;
 
-               err = igt_flush_test(gt->i915, 0);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        }
@@ -655,11 +638,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
        if (intel_gt_is_wedged(gt))
                err = -EIO;
 
-       if (active) {
-               mutex_lock(&gt->i915->drm.struct_mutex);
+       if (active)
                hang_fini(&h);
-               mutex_unlock(&gt->i915->drm.struct_mutex);
-       }
 
        return err;
 }
@@ -741,10 +721,8 @@ static int active_engine(void *data)
                struct i915_request *old = rq[idx];
                struct i915_request *new;
 
-               mutex_lock(&engine->i915->drm.struct_mutex);
                new = igt_request_alloc(ctx[idx], engine);
                if (IS_ERR(new)) {
-                       mutex_unlock(&engine->i915->drm.struct_mutex);
                        err = PTR_ERR(new);
                        break;
                }
@@ -755,7 +733,6 @@ static int active_engine(void *data)
 
                rq[idx] = i915_request_get(new);
                i915_request_add(new);
-               mutex_unlock(&engine->i915->drm.struct_mutex);
 
                err = active_request_put(old);
                if (err)
@@ -795,9 +772,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
                return 0;
 
        if (flags & TEST_ACTIVE) {
-               mutex_lock(&gt->i915->drm.struct_mutex);
                err = hang_init(&h, gt);
-               mutex_unlock(&gt->i915->drm.struct_mutex);
                if (err)
                        return err;
 
@@ -855,17 +830,14 @@ static int __igt_reset_engines(struct intel_gt *gt,
                        struct i915_request *rq = NULL;
 
                        if (flags & TEST_ACTIVE) {
-                               mutex_lock(&gt->i915->drm.struct_mutex);
                                rq = hang_create_request(&h, engine);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
-                                       mutex_unlock(&gt->i915->drm.struct_mutex);
                                        break;
                                }
 
                                i915_request_get(rq);
                                i915_request_add(rq);
-                               mutex_unlock(&gt->i915->drm.struct_mutex);
 
                                if (!wait_until_running(&h, rq)) {
                                        struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -977,9 +949,7 @@ unwind:
                if (err)
                        break;
 
-               mutex_lock(&gt->i915->drm.struct_mutex);
-               err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
-               mutex_unlock(&gt->i915->drm.struct_mutex);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        }
@@ -987,11 +957,8 @@ unwind:
        if (intel_gt_is_wedged(gt))
                err = -EIO;
 
-       if (flags & TEST_ACTIVE) {
-               mutex_lock(&gt->i915->drm.struct_mutex);
+       if (flags & TEST_ACTIVE)
                hang_fini(&h);
-               mutex_unlock(&gt->i915->drm.struct_mutex);
-       }
 
        return err;
 }
@@ -1061,7 +1028,6 @@ static int igt_reset_wait(void *arg)
 
        igt_global_reset_lock(gt);
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
        err = hang_init(&h, gt);
        if (err)
                goto unlock;
@@ -1109,7 +1075,6 @@ out_rq:
 fini:
        hang_fini(&h);
 unlock:
-       mutex_unlock(&gt->i915->drm.struct_mutex);
        igt_global_reset_unlock(gt);
 
        if (intel_gt_is_wedged(gt))
@@ -1189,10 +1154,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 
        /* Check that we can recover an unbind stuck on a hanging request */
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
        err = hang_init(&h, gt);
        if (err)
-               goto unlock;
+               return err;
 
        obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
        if (IS_ERR(obj)) {
@@ -1255,8 +1219,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        if (err)
                goto out_rq;
 
-       mutex_unlock(&gt->i915->drm.struct_mutex);
-
        if (!wait_until_running(&h, rq)) {
                struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
@@ -1305,16 +1267,12 @@ out_reset:
                put_task_struct(tsk);
        }
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
 out_rq:
        i915_request_put(rq);
 out_obj:
        i915_gem_object_put(obj);
 fini:
        hang_fini(&h);
-unlock:
-       mutex_unlock(&gt->i915->drm.struct_mutex);
-
        if (intel_gt_is_wedged(gt))
                return -EIO;
 
@@ -1396,7 +1354,6 @@ static int igt_reset_queue(void *arg)
 
        igt_global_reset_lock(gt);
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
        err = hang_init(&h, gt);
        if (err)
                goto unlock;
@@ -1511,7 +1468,7 @@ static int igt_reset_queue(void *arg)
 
                i915_request_put(prev);
 
-               err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+               err = igt_flush_test(gt->i915);
                if (err)
                        break;
        }
@@ -1519,7 +1476,6 @@ static int igt_reset_queue(void *arg)
 fini:
        hang_fini(&h);
 unlock:
-       mutex_unlock(&gt->i915->drm.struct_mutex);
        igt_global_reset_unlock(gt);
 
        if (intel_gt_is_wedged(gt))
@@ -1546,11 +1502,9 @@ static int igt_handle_error(void *arg)
        if (!engine || !intel_engine_can_store_dword(engine))
                return 0;
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
-
        err = hang_init(&h, gt);
        if (err)
-               goto err_unlock;
+               return err;
 
        rq = hang_create_request(&h, engine);
        if (IS_ERR(rq)) {
@@ -1574,8 +1528,6 @@ static int igt_handle_error(void *arg)
                goto err_request;
        }
 
-       mutex_unlock(&gt->i915->drm.struct_mutex);
-
        /* Temporarily disable error capture */
        error = xchg(&global->first_error, (void *)-1);
 
@@ -1583,8 +1535,6 @@ static int igt_handle_error(void *arg)
 
        xchg(&global->first_error, error);
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
-
        if (rq->fence.error != -EIO) {
                pr_err("Guilty request not identified!\n");
                err = -EINVAL;
@@ -1595,8 +1545,6 @@ err_request:
        i915_request_put(rq);
 err_fini:
        hang_fini(&h);
-err_unlock:
-       mutex_unlock(&gt->i915->drm.struct_mutex);
        return err;
 }
 
@@ -1689,7 +1637,6 @@ static int igt_reset_engines_atomic(void *arg)
                return 0;
 
        igt_global_reset_lock(gt);
-       mutex_lock(&gt->i915->drm.struct_mutex);
 
        /* Flush any requests before we get started and check basics */
        if (!igt_force_reset(gt))
@@ -1709,9 +1656,7 @@ static int igt_reset_engines_atomic(void *arg)
 out:
        /* As we poke around the guts, do a full reset before continuing. */
        igt_force_reset(gt);
-
 unlock:
-       mutex_unlock(&gt->i915->drm.struct_mutex);
        igt_global_reset_unlock(gt);
 
        return err;
@@ -1751,10 +1696,6 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 
        err = intel_gt_live_subtests(tests, gt);
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
-       igt_flush_test(gt->i915, I915_WAIT_LOCKED);
-       mutex_unlock(&gt->i915->drm.struct_mutex);
-
        i915_modparams.enable_hangcheck = saved_hangcheck;
        intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
 
index dd25636abc5bc477fb4cd460ff912ad754aded60..04c1cf57364268f34462307da93464522054ce16 100644 (file)
@@ -61,7 +61,7 @@ static int live_sanitycheck(void *arg)
                }
 
                igt_spinner_end(&spin);
-               if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+               if (igt_flush_test(i915)) {
                        err = -EIO;
                        goto err_ctx;
                }
@@ -384,8 +384,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
        if (err)
                goto out;
 
-       if (i915_request_wait(head,
-                             I915_WAIT_LOCKED,
+       if (i915_request_wait(head, 0,
                              2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
                pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
                       count, n);
@@ -457,7 +456,7 @@ static int live_timeslice_preempt(void *arg)
                        if (err)
                                goto err_pin;
 
-                       if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+                       if (igt_flush_test(i915)) {
                                err = -EIO;
                                goto err_pin;
                        }
@@ -1010,7 +1009,7 @@ static int live_nopreempt(void *arg)
                        goto err_wedged;
                }
 
-               if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               if (igt_flush_test(i915))
                        goto err_wedged;
        }
 
@@ -1075,7 +1074,7 @@ static int live_suppress_self_preempt(void *arg)
                if (!intel_engine_has_preemption(engine))
                        continue;
 
-               if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               if (igt_flush_test(i915))
                        goto err_wedged;
 
                intel_engine_pm_get(engine);
@@ -1136,7 +1135,7 @@ static int live_suppress_self_preempt(void *arg)
                }
 
                intel_engine_pm_put(engine);
-               if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               if (igt_flush_test(i915))
                        goto err_wedged;
        }
 
@@ -1297,7 +1296,7 @@ static int live_suppress_wait_preempt(void *arg)
                        for (i = 0; i < ARRAY_SIZE(client); i++)
                                igt_spinner_end(&client[i].spin);
 
-                       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+                       if (igt_flush_test(i915))
                                goto err_wedged;
 
                        if (engine->execlists.preempt_hang.count) {
@@ -1576,7 +1575,7 @@ static int live_preempt_hang(void *arg)
 
                igt_spinner_end(&spin_hi);
                igt_spinner_end(&spin_lo);
-               if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+               if (igt_flush_test(i915)) {
                        err = -EIO;
                        goto err_ctx_lo;
                }
@@ -1973,7 +1972,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
                prime, div64_u64(ktime_to_ns(times[1]), prime));
 
 out:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        for (nc = 0; nc < nctx; nc++) {
@@ -2118,7 +2117,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
                goto out;
 
 out:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        for (n = 0; n < nsibling; n++)
@@ -2296,7 +2295,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
 out:
        for (n = 0; !IS_ERR(rq[n]); n++)
                i915_request_put(rq[n]);
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        kernel_context_close(ctx);
index 3214814031650e5844e5991622e168776b9c9fa3..16abfabf08c7d8bd3b4a7de69f99b96f4e226525 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <linux/prime_numbers.h>
 
-#include "gem/i915_gem_pm.h"
+#include "intel_engine_pm.h"
 #include "intel_gt.h"
 
 #include "../selftests/i915_random.h"
@@ -136,7 +136,6 @@ static int mock_hwsp_freelist(void *arg)
                goto err_put;
        }
 
-       mutex_lock(&state.i915->drm.struct_mutex);
        for (p = phases; p->name; p++) {
                pr_debug("%s(%s)\n", __func__, p->name);
                for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
@@ -149,7 +148,6 @@ static int mock_hwsp_freelist(void *arg)
 out:
        for (na = 0; na < state.max; na++)
                __mock_hwsp_record(&state, na, NULL);
-       mutex_unlock(&state.i915->drm.struct_mutex);
        kfree(state.history);
 err_put:
        drm_dev_put(&state.i915->drm);
@@ -449,8 +447,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
        struct i915_request *rq;
        int err;
 
-       lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
-
        err = intel_timeline_pin(tl);
        if (err) {
                rq = ERR_PTR(err);
@@ -461,10 +457,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
        if (IS_ERR(rq))
                goto out_unpin;
 
+       i915_request_get(rq);
+
        err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
        i915_request_add(rq);
-       if (err)
+       if (err) {
+               i915_request_put(rq);
                rq = ERR_PTR(err);
+       }
 
 out_unpin:
        intel_timeline_unpin(tl);
@@ -500,7 +500,6 @@ static int live_hwsp_engine(void *arg)
        struct intel_timeline **timelines;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       intel_wakeref_t wakeref;
        unsigned long count, n;
        int err = 0;
 
@@ -515,14 +514,13 @@ static int live_hwsp_engine(void *arg)
        if (!timelines)
                return -ENOMEM;
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        count = 0;
        for_each_engine(engine, i915, id) {
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
+               intel_engine_pm_get(engine);
+
                for (n = 0; n < NUM_TIMELINES; n++) {
                        struct intel_timeline *tl;
                        struct i915_request *rq;
@@ -530,22 +528,26 @@ static int live_hwsp_engine(void *arg)
                        tl = checked_intel_timeline_create(i915);
                        if (IS_ERR(tl)) {
                                err = PTR_ERR(tl);
-                               goto out;
+                               break;
                        }
 
                        rq = tl_write(tl, engine, count);
                        if (IS_ERR(rq)) {
                                intel_timeline_put(tl);
                                err = PTR_ERR(rq);
-                               goto out;
+                               break;
                        }
 
                        timelines[count++] = tl;
+                       i915_request_put(rq);
                }
+
+               intel_engine_pm_put(engine);
+               if (err)
+                       break;
        }
 
-out:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        for (n = 0; n < count; n++) {
@@ -559,11 +561,7 @@ out:
                intel_timeline_put(tl);
        }
 
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
-
        kvfree(timelines);
-
        return err;
 #undef NUM_TIMELINES
 }
@@ -575,7 +573,6 @@ static int live_hwsp_alternate(void *arg)
        struct intel_timeline **timelines;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       intel_wakeref_t wakeref;
        unsigned long count, n;
        int err = 0;
 
@@ -591,9 +588,6 @@ static int live_hwsp_alternate(void *arg)
        if (!timelines)
                return -ENOMEM;
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        count = 0;
        for (n = 0; n < NUM_TIMELINES; n++) {
                for_each_engine(engine, i915, id) {
@@ -605,11 +599,14 @@ static int live_hwsp_alternate(void *arg)
 
                        tl = checked_intel_timeline_create(i915);
                        if (IS_ERR(tl)) {
+                               intel_engine_pm_put(engine);
                                err = PTR_ERR(tl);
                                goto out;
                        }
 
+                       intel_engine_pm_get(engine);
                        rq = tl_write(tl, engine, count);
+                       intel_engine_pm_put(engine);
                        if (IS_ERR(rq)) {
                                intel_timeline_put(tl);
                                err = PTR_ERR(rq);
@@ -617,11 +614,12 @@ static int live_hwsp_alternate(void *arg)
                        }
 
                        timelines[count++] = tl;
+                       i915_request_put(rq);
                }
        }
 
 out:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        for (n = 0; n < count; n++) {
@@ -635,11 +633,7 @@ out:
                intel_timeline_put(tl);
        }
 
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
-
        kvfree(timelines);
-
        return err;
 #undef NUM_TIMELINES
 }
@@ -650,7 +644,6 @@ static int live_hwsp_wrap(void *arg)
        struct intel_engine_cs *engine;
        struct intel_timeline *tl;
        enum intel_engine_id id;
-       intel_wakeref_t wakeref;
        int err = 0;
 
        /*
@@ -658,14 +651,10 @@ static int live_hwsp_wrap(void *arg)
         * foreign GPU references.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        tl = intel_timeline_create(&i915->gt, NULL);
-       if (IS_ERR(tl)) {
-               err = PTR_ERR(tl);
-               goto out_rpm;
-       }
+       if (IS_ERR(tl))
+               return PTR_ERR(tl);
+
        if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
                goto out_free;
 
@@ -681,7 +670,9 @@ static int live_hwsp_wrap(void *arg)
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
+               intel_engine_pm_get(engine);
                rq = i915_request_create(engine->kernel_context);
+               intel_engine_pm_put(engine);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
                        goto out;
@@ -747,16 +738,12 @@ static int live_hwsp_wrap(void *arg)
        }
 
 out:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        intel_timeline_unpin(tl);
 out_free:
        intel_timeline_put(tl);
-out_rpm:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
-
        return err;
 }
 
@@ -765,7 +752,6 @@ static int live_hwsp_recycle(void *arg)
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       intel_wakeref_t wakeref;
        unsigned long count;
        int err = 0;
 
@@ -775,9 +761,6 @@ static int live_hwsp_recycle(void *arg)
         * want to confuse ourselves or the GPU.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        count = 0;
        for_each_engine(engine, i915, id) {
                IGT_TIMEOUT(end_time);
@@ -785,6 +768,8 @@ static int live_hwsp_recycle(void *arg)
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
+               intel_engine_pm_get(engine);
+
                do {
                        struct intel_timeline *tl;
                        struct i915_request *rq;
@@ -792,21 +777,22 @@ static int live_hwsp_recycle(void *arg)
                        tl = checked_intel_timeline_create(i915);
                        if (IS_ERR(tl)) {
                                err = PTR_ERR(tl);
-                               goto out;
+                               break;
                        }
 
                        rq = tl_write(tl, engine, count);
                        if (IS_ERR(rq)) {
                                intel_timeline_put(tl);
                                err = PTR_ERR(rq);
-                               goto out;
+                               break;
                        }
 
                        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
                                pr_err("Wait for timeline writes timed out!\n");
+                               i915_request_put(rq);
                                intel_timeline_put(tl);
                                err = -EIO;
-                               goto out;
+                               break;
                        }
 
                        if (*tl->hwsp_seqno != count) {
@@ -815,17 +801,18 @@ static int live_hwsp_recycle(void *arg)
                                err = -EINVAL;
                        }
 
+                       i915_request_put(rq);
                        intel_timeline_put(tl);
                        count++;
 
                        if (err)
-                               goto out;
+                               break;
                } while (!__igt_timeout(end_time, NULL));
-       }
 
-out:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
+               intel_engine_pm_put(engine);
+               if (err)
+                       break;
+       }
 
        return err;
 }
index d40ce0709bff4dc89aeecafb4ecaff7f3cc1726a..4ee2e2babd0da79813c8cb72063e8f47a8d76366 100644 (file)
@@ -676,7 +676,7 @@ out_unpin:
                        break;
        }
 
-       if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(ctx->i915))
                err = -EIO;
 out_batch:
        i915_vma_unpin_and_release(&batch, 0);
@@ -1090,7 +1090,7 @@ err:
                kernel_context_close(client[i].ctx);
        }
 
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
 
        return err;
@@ -1248,7 +1248,7 @@ err:
        igt_global_reset_unlock(&i915->gt);
        kernel_context_close(ctx);
 
-       igt_flush_test(i915, I915_WAIT_LOCKED);
+       igt_flush_test(i915);
 
        return ret;
 }
index fec9fb7cc384b404617dd0b645e50b1dd300a5fa..385289895107b0a92b5d61533cb0ffb917bd6335 100644 (file)
@@ -3621,6 +3621,7 @@ static int
 i915_drop_caches_set(void *data, u64 val)
 {
        struct drm_i915_private *i915 = data;
+       int ret;
 
        DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
                  val, val & DROP_ALL);
@@ -3630,40 +3631,21 @@ i915_drop_caches_set(void *data, u64 val)
                     I915_IDLE_ENGINES_TIMEOUT))
                intel_gt_set_wedged(&i915->gt);
 
-       /* No need to check and wait for gpu resets, only libdrm auto-restarts
-        * on ioctls on -EAGAIN. */
-       if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
-               int ret;
+       if (val & DROP_RETIRE)
+               i915_retire_requests(i915);
 
-               ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+       if (val & (DROP_IDLE | DROP_ACTIVE)) {
+               ret = i915_gem_wait_for_idle(i915,
+                                            I915_WAIT_INTERRUPTIBLE,
+                                            MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        return ret;
+       }
 
-               /*
-                * To finish the flush of the idle_worker, we must complete
-                * the switch-to-kernel-context, which requires a double
-                * pass through wait_for_idle: first queues the switch,
-                * second waits for the switch.
-                */
-               if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
-                       ret = i915_gem_wait_for_idle(i915,
-                                                    I915_WAIT_INTERRUPTIBLE |
-                                                    I915_WAIT_LOCKED,
-                                                    MAX_SCHEDULE_TIMEOUT);
-
-               if (ret == 0 && val & DROP_IDLE)
-                       ret = i915_gem_wait_for_idle(i915,
-                                                    I915_WAIT_INTERRUPTIBLE |
-                                                    I915_WAIT_LOCKED,
-                                                    MAX_SCHEDULE_TIMEOUT);
-
-               if (val & DROP_RETIRE)
-                       i915_retire_requests(i915);
-
-               mutex_unlock(&i915->drm.struct_mutex);
-
-               if (ret == 0 && val & DROP_IDLE)
-                       ret = intel_gt_pm_wait_for_idle(&i915->gt);
+       if (val & DROP_IDLE) {
+               ret = intel_gt_pm_wait_for_idle(&i915->gt);
+               if (ret)
+                       return ret;
        }
 
        if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
index b0aa0a7c680f8b0fe4b9224df2d645b35146f4b2..83c2c7bf1e3496a87d41c1e4ea5320d91cdc59fa 100644 (file)
@@ -945,19 +945,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
        if (!intel_gt_pm_is_awake(gt))
                return 0;
 
-       GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
-                 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
-                 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
-
-       timeout = wait_for_timelines(gt, flags, timeout);
-       if (timeout < 0)
-               return timeout;
+       do {
+               timeout = wait_for_timelines(gt, flags, timeout);
+               if (timeout < 0)
+                       return timeout;
 
-       if (flags & I915_WAIT_LOCKED) {
-               lockdep_assert_held(&i915->drm.struct_mutex);
+               cond_resched();
+               if (signal_pending(current))
+                       return -EINTR;
 
-               i915_retire_requests(i915);
-       }
+       } while (i915_retire_requests(i915));
 
        return 0;
 }
index 91a885c36c6b66e1f8b73d7ad0046d6e5e6e3ad9..621fb33cda30d0120453c60c21aec9bb129b40c6 100644 (file)
@@ -308,10 +308,9 @@ long i915_request_wait(struct i915_request *rq,
                       long timeout)
        __attribute__((nonnull(1)));
 #define I915_WAIT_INTERRUPTIBLE        BIT(0)
-#define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_PRIORITY     BIT(2) /* small priority bump for the request */
-#define I915_WAIT_ALL          BIT(3) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_PRIORITY     BIT(1) /* small priority bump for the request */
+#define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
 static inline bool i915_request_signaled(const struct i915_request *rq)
 {
index 2cc71bcf884fe9d38bac102ec18730492872dd5f..268192b5613bdc54bbd61757e6e7b478b9fcbe9b 100644 (file)
@@ -162,10 +162,8 @@ static int live_active_wait(void *arg)
 
        __live_put(active);
 
-       mutex_lock(&i915->drm.struct_mutex);
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
-       mutex_unlock(&i915->drm.struct_mutex);
 
        return err;
 }
@@ -183,10 +181,8 @@ static int live_active_retire(void *arg)
                return PTR_ERR(active);
 
        /* waits for & retires all requests */
-       mutex_lock(&i915->drm.struct_mutex);
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
-       mutex_unlock(&i915->drm.struct_mutex);
 
        if (!READ_ONCE(active->retired)) {
                pr_err("i915_active not retired after flushing!\n");
index 75a4695b82bb78a819c43cebe29c3e190035de7f..52d2df843148eb0dd2a1e9dc0ee8c1e95fa92a79 100644 (file)
@@ -523,7 +523,7 @@ static int igt_evict_contexts(void *arg)
 
        mutex_lock(&i915->ggtt.vm.mutex);
 out_locked:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
        while (reserved) {
                struct reserved *next = reserved->next;
index 02749bbfd0cfc94343b080a960601a9bfd2c022d..e40e6cfa51f12dc70f65accdedbb7563c5d16be2 100644 (file)
@@ -1705,12 +1705,8 @@ int i915_gem_gtt_mock_selftests(void)
 
        err = i915_subtests(tests, ggtt);
 
-       mutex_lock(&i915->drm.struct_mutex);
        mock_device_flush(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
-
        i915_gem_drain_freed_objects(i915);
-
        mock_fini_ggtt(ggtt);
        kfree(ggtt);
 out_put:
@@ -2006,7 +2002,7 @@ static int igt_cs_tlb(void *arg)
                }
        }
 end:
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
        i915_gem_context_unlock_engines(ctx);
        i915_gem_object_unpin_map(out);
index eb175da4854759ae08780147f12717c50df164ae..d7d68c6a6bd502c1182f959663d763fa39d5c419 100644 (file)
@@ -41,21 +41,16 @@ static int igt_add_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct i915_request *request;
-       int err = -ENOMEM;
 
        /* Basic preliminary test to create a request and let it loose! */
 
-       mutex_lock(&i915->drm.struct_mutex);
        request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
        if (!request)
-               goto out_unlock;
+               return -ENOMEM;
 
        i915_request_add(request);
 
-       err = 0;
-out_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
-       return err;
+       return 0;
 }
 
 static int igt_wait_request(void *arg)
@@ -67,12 +62,10 @@ static int igt_wait_request(void *arg)
 
        /* Submit a request, then wait upon it */
 
-       mutex_lock(&i915->drm.struct_mutex);
        request = mock_request(i915->engine[RCS0]->kernel_context, T);
-       if (!request) {
-               err = -ENOMEM;
-               goto out_unlock;
-       }
+       if (!request)
+               return -ENOMEM;
+
        i915_request_get(request);
 
        if (i915_request_wait(request, 0, 0) != -ETIME) {
@@ -125,9 +118,7 @@ static int igt_wait_request(void *arg)
        err = 0;
 out_request:
        i915_request_put(request);
-out_unlock:
        mock_device_flush(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -140,52 +131,45 @@ static int igt_fence_wait(void *arg)
 
        /* Submit a request, treat it as a fence and wait upon it */
 
-       mutex_lock(&i915->drm.struct_mutex);
        request = mock_request(i915->engine[RCS0]->kernel_context, T);
-       if (!request) {
-               err = -ENOMEM;
-               goto out_locked;
-       }
+       if (!request)
+               return -ENOMEM;
 
        if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
                pr_err("fence wait success before submit (expected timeout)!\n");
-               goto out_locked;
+               goto out;
        }
 
        i915_request_add(request);
-       mutex_unlock(&i915->drm.struct_mutex);
 
        if (dma_fence_is_signaled(&request->fence)) {
                pr_err("fence signaled immediately!\n");
-               goto out_device;
+               goto out;
        }
 
        if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
                pr_err("fence wait success after submit (expected timeout)!\n");
-               goto out_device;
+               goto out;
        }
 
        if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
                pr_err("fence wait timed out (expected success)!\n");
-               goto out_device;
+               goto out;
        }
 
        if (!dma_fence_is_signaled(&request->fence)) {
                pr_err("fence unsignaled after waiting!\n");
-               goto out_device;
+               goto out;
        }
 
        if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
                pr_err("fence wait timed out when complete (expected success)!\n");
-               goto out_device;
+               goto out;
        }
 
        err = 0;
-out_device:
-       mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
        mock_device_flush(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -199,6 +183,8 @@ static int igt_request_rewind(void *arg)
 
        mutex_lock(&i915->drm.struct_mutex);
        ctx[0] = mock_context(i915, "A");
+       mutex_unlock(&i915->drm.struct_mutex);
+
        ce = i915_gem_context_get_engine(ctx[0], RCS0);
        GEM_BUG_ON(IS_ERR(ce));
        request = mock_request(ce, 2 * HZ);
@@ -211,7 +197,10 @@ static int igt_request_rewind(void *arg)
        i915_request_get(request);
        i915_request_add(request);
 
+       mutex_lock(&i915->drm.struct_mutex);
        ctx[1] = mock_context(i915, "B");
+       mutex_unlock(&i915->drm.struct_mutex);
+
        ce = i915_gem_context_get_engine(ctx[1], RCS0);
        GEM_BUG_ON(IS_ERR(ce));
        vip = mock_request(ce, 0);
@@ -233,7 +222,6 @@ static int igt_request_rewind(void *arg)
        request->engine->submit_request(request);
        rcu_read_unlock();
 
-       mutex_unlock(&i915->drm.struct_mutex);
 
        if (i915_request_wait(vip, 0, HZ) == -ETIME) {
                pr_err("timed out waiting for high priority request\n");
@@ -248,14 +236,12 @@ static int igt_request_rewind(void *arg)
        err = 0;
 err:
        i915_request_put(vip);
-       mutex_lock(&i915->drm.struct_mutex);
 err_context_1:
        mock_context_close(ctx[1]);
        i915_request_put(request);
 err_context_0:
        mock_context_close(ctx[0]);
        mock_device_flush(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -282,7 +268,6 @@ __live_request_alloc(struct intel_context *ce)
 static int __igt_breadcrumbs_smoketest(void *arg)
 {
        struct smoketest *t = arg;
-       struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
        const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
        const unsigned int total = 4 * t->ncontexts + 1;
        unsigned int num_waits = 0, num_fences = 0;
@@ -337,14 +322,11 @@ static int __igt_breadcrumbs_smoketest(void *arg)
                        struct i915_request *rq;
                        struct intel_context *ce;
 
-                       mutex_lock(BKL);
-
                        ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
                        GEM_BUG_ON(IS_ERR(ce));
                        rq = t->request_alloc(ce);
                        intel_context_put(ce);
                        if (IS_ERR(rq)) {
-                               mutex_unlock(BKL);
                                err = PTR_ERR(rq);
                                count = n;
                                break;
@@ -357,8 +339,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
                        requests[n] = i915_request_get(rq);
                        i915_request_add(rq);
 
-                       mutex_unlock(BKL);
-
                        if (err >= 0)
                                err = i915_sw_fence_await_dma_fence(wait,
                                                                    &rq->fence,
@@ -457,15 +437,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
                goto out_threads;
        }
 
-       mutex_lock(&t.engine->i915->drm.struct_mutex);
        for (n = 0; n < t.ncontexts; n++) {
+               mutex_lock(&t.engine->i915->drm.struct_mutex);
                t.contexts[n] = mock_context(t.engine->i915, "mock");
+               mutex_unlock(&t.engine->i915->drm.struct_mutex);
                if (!t.contexts[n]) {
                        ret = -ENOMEM;
                        goto out_contexts;
                }
        }
-       mutex_unlock(&t.engine->i915->drm.struct_mutex);
 
        for (n = 0; n < ncpus; n++) {
                threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
@@ -495,18 +475,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
                atomic_long_read(&t.num_fences),
                ncpus);
 
-       mutex_lock(&t.engine->i915->drm.struct_mutex);
 out_contexts:
        for (n = 0; n < t.ncontexts; n++) {
                if (!t.contexts[n])
                        break;
                mock_context_close(t.contexts[n]);
        }
-       mutex_unlock(&t.engine->i915->drm.struct_mutex);
        kfree(t.contexts);
 out_threads:
        kfree(threads);
-
        return ret;
 }
 
@@ -539,7 +516,6 @@ static int live_nop_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
-       intel_wakeref_t wakeref;
        struct igt_live_test t;
        unsigned int id;
        int err = -ENODEV;
@@ -549,28 +525,25 @@ static int live_nop_request(void *arg)
         * the overhead of submitting requests to the hardware.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        for_each_engine(engine, i915, id) {
-               struct i915_request *request = NULL;
                unsigned long n, prime;
                IGT_TIMEOUT(end_time);
                ktime_t times[2] = {};
 
                err = igt_live_test_begin(&t, i915, __func__, engine->name);
                if (err)
-                       goto out_unlock;
+                       return err;
 
                for_each_prime_number_from(prime, 1, 8192) {
+                       struct i915_request *request = NULL;
+
                        times[1] = ktime_get_raw();
 
                        for (n = 0; n < prime; n++) {
+                               i915_request_put(request);
                                request = i915_request_create(engine->kernel_context);
-                               if (IS_ERR(request)) {
-                                       err = PTR_ERR(request);
-                                       goto out_unlock;
-                               }
+                               if (IS_ERR(request))
+                                       return PTR_ERR(request);
 
                                /* This space is left intentionally blank.
                                 *
@@ -585,9 +558,11 @@ static int live_nop_request(void *arg)
                                 * for latency.
                                 */
 
+                               i915_request_get(request);
                                i915_request_add(request);
                        }
                        i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+                       i915_request_put(request);
 
                        times[1] = ktime_sub(ktime_get_raw(), times[1]);
                        if (prime == 1)
@@ -599,7 +574,7 @@ static int live_nop_request(void *arg)
 
                err = igt_live_test_end(&t);
                if (err)
-                       goto out_unlock;
+                       return err;
 
                pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
                        engine->name,
@@ -607,9 +582,6 @@ static int live_nop_request(void *arg)
                        prime, div64_u64(ktime_to_ns(times[1]), prime));
        }
 
-out_unlock:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -679,6 +651,7 @@ empty_request(struct intel_engine_cs *engine,
        if (err)
                goto out_request;
 
+       i915_request_get(request);
 out_request:
        i915_request_add(request);
        return err ? ERR_PTR(err) : request;
@@ -688,7 +661,6 @@ static int live_empty_request(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
-       intel_wakeref_t wakeref;
        struct igt_live_test t;
        struct i915_vma *batch;
        unsigned int id;
@@ -699,14 +671,9 @@ static int live_empty_request(void *arg)
         * the overhead of submitting requests to the hardware.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        batch = empty_batch(i915);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto out_unlock;
-       }
+       if (IS_ERR(batch))
+               return PTR_ERR(batch);
 
        for_each_engine(engine, i915, id) {
                IGT_TIMEOUT(end_time);
@@ -730,6 +697,7 @@ static int live_empty_request(void *arg)
                        times[1] = ktime_get_raw();
 
                        for (n = 0; n < prime; n++) {
+                               i915_request_put(request);
                                request = empty_request(engine, batch);
                                if (IS_ERR(request)) {
                                        err = PTR_ERR(request);
@@ -745,6 +713,7 @@ static int live_empty_request(void *arg)
                        if (__igt_timeout(end_time, NULL))
                                break;
                }
+               i915_request_put(request);
 
                err = igt_live_test_end(&t);
                if (err)
@@ -759,9 +728,6 @@ static int live_empty_request(void *arg)
 out_batch:
        i915_vma_unpin(batch);
        i915_vma_put(batch);
-out_unlock:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -841,7 +807,6 @@ static int live_all_engines(void *arg)
        struct drm_i915_private *i915 = arg;
        struct intel_engine_cs *engine;
        struct i915_request *request[I915_NUM_ENGINES];
-       intel_wakeref_t wakeref;
        struct igt_live_test t;
        struct i915_vma *batch;
        unsigned int id;
@@ -852,18 +817,15 @@ static int live_all_engines(void *arg)
         * block doing so, and that they don't complete too soon.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
-               goto out_unlock;
+               return err;
 
        batch = recursive_batch(i915);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
                pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
-               goto out_unlock;
+               return err;
        }
 
        for_each_engine(engine, i915, id) {
@@ -933,9 +895,6 @@ out_request:
                        i915_request_put(request[id]);
        i915_vma_unpin(batch);
        i915_vma_put(batch);
-out_unlock:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -945,7 +904,6 @@ static int live_sequential_engines(void *arg)
        struct i915_request *request[I915_NUM_ENGINES] = {};
        struct i915_request *prev = NULL;
        struct intel_engine_cs *engine;
-       intel_wakeref_t wakeref;
        struct igt_live_test t;
        unsigned int id;
        int err;
@@ -956,12 +914,9 @@ static int live_sequential_engines(void *arg)
         * they are running on independent engines.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
        err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
-               goto out_unlock;
+               return err;
 
        for_each_engine(engine, i915, id) {
                struct i915_vma *batch;
@@ -971,7 +926,7 @@ static int live_sequential_engines(void *arg)
                        err = PTR_ERR(batch);
                        pr_err("%s: Unable to create batch for %s, err=%d\n",
                               __func__, engine->name, err);
-                       goto out_unlock;
+                       return err;
                }
 
                request[id] = i915_request_create(engine->kernel_context);
@@ -1063,9 +1018,6 @@ out_request:
                i915_vma_put(request[id]->batch);
                i915_request_put(request[id]);
        }
-out_unlock:
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 }
 
@@ -1080,16 +1032,12 @@ static int __live_parallel_engine1(void *arg)
                struct i915_request *rq;
                int err;
 
-               mutex_lock(&engine->i915->drm.struct_mutex);
                rq = i915_request_create(engine->kernel_context);
-               if (IS_ERR(rq)) {
-                       mutex_unlock(&engine->i915->drm.struct_mutex);
+               if (IS_ERR(rq))
                        return PTR_ERR(rq);
-               }
 
                i915_request_get(rq);
                i915_request_add(rq);
-               mutex_unlock(&engine->i915->drm.struct_mutex);
 
                err = 0;
                if (i915_request_wait(rq, 0, HZ / 5) < 0)
@@ -1115,16 +1063,11 @@ static int __live_parallel_engineN(void *arg)
        do {
                struct i915_request *rq;
 
-               mutex_lock(&engine->i915->drm.struct_mutex);
                rq = i915_request_create(engine->kernel_context);
-               if (IS_ERR(rq)) {
-                       mutex_unlock(&engine->i915->drm.struct_mutex);
+               if (IS_ERR(rq))
                        return PTR_ERR(rq);
-               }
 
                i915_request_add(rq);
-               mutex_unlock(&engine->i915->drm.struct_mutex);
-
                count++;
        } while (!__igt_timeout(end_time, NULL));
 
@@ -1154,9 +1097,7 @@ static int live_parallel_engines(void *arg)
                struct task_struct *tsk[I915_NUM_ENGINES] = {};
                struct igt_live_test t;
 
-               mutex_lock(&i915->drm.struct_mutex);
                err = igt_live_test_begin(&t, i915, __func__, "");
-               mutex_unlock(&i915->drm.struct_mutex);
                if (err)
                        break;
 
@@ -1184,10 +1125,8 @@ static int live_parallel_engines(void *arg)
                        put_task_struct(tsk[id]);
                }
 
-               mutex_lock(&i915->drm.struct_mutex);
                if (igt_live_test_end(&t))
                        err = -EIO;
-               mutex_unlock(&i915->drm.struct_mutex);
        }
 
        return err;
@@ -1280,9 +1219,10 @@ static int live_breadcrumbs_smoketest(void *arg)
                goto out_threads;
        }
 
-       mutex_lock(&i915->drm.struct_mutex);
        for (n = 0; n < t[0].ncontexts; n++) {
+               mutex_lock(&i915->drm.struct_mutex);
                t[0].contexts[n] = live_context(i915, file);
+               mutex_unlock(&i915->drm.struct_mutex);
                if (!t[0].contexts[n]) {
                        ret = -ENOMEM;
                        goto out_contexts;
@@ -1299,7 +1239,6 @@ static int live_breadcrumbs_smoketest(void *arg)
                t[id].max_batch = max_batches(t[0].contexts[0], engine);
                if (t[id].max_batch < 0) {
                        ret = t[id].max_batch;
-                       mutex_unlock(&i915->drm.struct_mutex);
                        goto out_flush;
                }
                /* One ring interleaved between requests from all cpus */
@@ -1314,7 +1253,6 @@ static int live_breadcrumbs_smoketest(void *arg)
                                          &t[id], "igt/%d.%d", id, n);
                        if (IS_ERR(tsk)) {
                                ret = PTR_ERR(tsk);
-                               mutex_unlock(&i915->drm.struct_mutex);
                                goto out_flush;
                        }
 
@@ -1322,7 +1260,6 @@ static int live_breadcrumbs_smoketest(void *arg)
                        threads[id * ncpus + n] = tsk;
                }
        }
-       mutex_unlock(&i915->drm.struct_mutex);
 
        msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
 
@@ -1350,10 +1287,8 @@ out_flush:
        pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
                num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
 
-       mutex_lock(&i915->drm.struct_mutex);
        ret = igt_live_test_end(&live) ?: ret;
 out_contexts:
-       mutex_unlock(&i915->drm.struct_mutex);
        kfree(t[0].contexts);
 out_threads:
        kfree(threads);
index 438ea0eaa4160c545b0ce639021459f7233b2d32..825a8286cbe84f3e395fd29a15394d14b8e36900 100644 (file)
@@ -263,10 +263,8 @@ int __i915_live_teardown(int err, void *data)
 {
        struct drm_i915_private *i915 = data;
 
-       mutex_lock(&i915->drm.struct_mutex);
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                err = -EIO;
-       mutex_unlock(&i915->drm.struct_mutex);
 
        i915_gem_drain_freed_objects(i915);
 
@@ -284,10 +282,8 @@ int __intel_gt_live_teardown(int err, void *data)
 {
        struct intel_gt *gt = data;
 
-       mutex_lock(&gt->i915->drm.struct_mutex);
-       if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(gt->i915))
                err = -EIO;
-       mutex_unlock(&gt->i915->drm.struct_mutex);
 
        i915_gem_drain_freed_objects(gt->i915);
 
index 0e4f66312b3936e49f200fe5413e589e9bf008a1..1c9db08f7c283b3f391c58ad12feee71f391e94e 100644 (file)
@@ -833,12 +833,8 @@ int i915_vma_mock_selftests(void)
 
        err = i915_subtests(tests, ggtt);
 
-       mutex_lock(&i915->drm.struct_mutex);
        mock_device_flush(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
-
        i915_gem_drain_freed_objects(i915);
-
        mock_fini_ggtt(ggtt);
        kfree(ggtt);
 out_put:
index d3b5eb402d33f0f35e134a4ee74f3de2bcc17292..2a5fbe46ea9f5f580048e5d8d36623e3e08b9c60 100644 (file)
 
 #include "igt_flush_test.h"
 
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+int igt_flush_test(struct drm_i915_private *i915)
 {
        int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
-       int repeat = !!(flags & I915_WAIT_LOCKED);
 
        cond_resched();
 
-       do {
-               if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
-                       pr_err("%pS timed out, cancelling all further testing.\n",
-                              __builtin_return_address(0));
+       i915_retire_requests(i915);
+       if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
+               pr_err("%pS timed out, cancelling all further testing.\n",
+                      __builtin_return_address(0));
 
-                       GEM_TRACE("%pS timed out.\n",
-                                 __builtin_return_address(0));
-                       GEM_TRACE_DUMP();
+               GEM_TRACE("%pS timed out.\n",
+                         __builtin_return_address(0));
+               GEM_TRACE_DUMP();
 
-                       intel_gt_set_wedged(&i915->gt);
-                       repeat = 0;
-                       ret = -EIO;
-               }
-
-               /* Ensure we also flush after wedging. */
-               if (flags & I915_WAIT_LOCKED)
-                       i915_retire_requests(i915);
-       } while (repeat--);
+               intel_gt_set_wedged(&i915->gt);
+               ret = -EIO;
+       }
+       i915_retire_requests(i915);
 
        return ret;
 }
index 63e009927c4328975b7acfcab8697d8487879c2a..7541fa74e6418b27138e2df864abe5da546f0f78 100644 (file)
@@ -9,6 +9,6 @@
 
 struct drm_i915_private;
 
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+int igt_flush_test(struct drm_i915_private *i915);
 
 #endif /* IGT_FLUSH_TEST_H */
index 3e902761cd1668cb5ab9bba47553e07ed67b81cb..04a6f88fdf64555de6ae7e3796f4c09e093f8757 100644 (file)
@@ -19,15 +19,12 @@ int igt_live_test_begin(struct igt_live_test *t,
        enum intel_engine_id id;
        int err;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
        t->i915 = i915;
        t->func = func;
        t->name = name;
 
        err = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED,
+                                    I915_WAIT_INTERRUPTIBLE,
                                     MAX_SCHEDULE_TIMEOUT);
        if (err) {
                pr_err("%s(%s): failed to idle before, with err=%d!",
@@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+       if (igt_flush_test(i915))
                return -EIO;
 
        if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
index 2448067822aff686f06e55ea4db34673109409b9..622bb2127453b91d91f61a07886c936ca41f82fd 100644 (file)
@@ -41,8 +41,6 @@ void mock_device_flush(struct drm_i915_private *i915)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
        do {
                for_each_engine(engine, i915, id)
                        mock_engine_flush(engine);
@@ -55,9 +53,7 @@ static void mock_device_release(struct drm_device *dev)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       mutex_lock(&i915->drm.struct_mutex);
        mock_device_flush(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
 
        flush_work(&i915->gem.idle_work);
        i915_gem_drain_workqueue(i915);