Merge tag 'drm-next-2022-10-05' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem.c
index b605d0ceaefad31b19b7dfbecc272b882652802f..2bdddb61ebd7ae6215eaa26706a2df3258d42171 100644 (file)
@@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
                                 &to_gt(i915)->ggtt->userfault_list, userfault_link)
                __i915_gem_object_release_mmap_gtt(obj);
 
+       list_for_each_entry_safe(obj, on,
+                                &to_gt(i915)->lmem_userfault_list, userfault_link)
+               i915_gem_object_runtime_pm_release_mmap_offset(obj);
+
        /*
         * The fence will be lost when the device powers down. If any were
         * in use by hardware (i.e. they are pinned), we should not be powering
@@ -885,7 +889,7 @@ static void discard_ggtt_vma(struct i915_vma *vma)
 struct i915_vma *
 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
                            struct i915_gem_ww_ctx *ww,
-                           const struct i915_ggtt_view *view,
+                           const struct i915_gtt_view *view,
                            u64 size, u64 alignment, u64 flags)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -896,7 +900,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
        GEM_WARN_ON(!ww);
 
        if (flags & PIN_MAPPABLE &&
-           (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
+           (!view || view->type == I915_GTT_VIEW_NORMAL)) {
                /*
                 * If the required space is larger than the available
                 * aperture, we will not able to find a slot for the
@@ -987,7 +991,7 @@ new_vma:
 
 struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
-                        const struct i915_ggtt_view *view,
+                        const struct i915_gtt_view *view,
                         u64 size, u64 alignment, u64 flags)
 {
        struct i915_gem_ww_ctx ww;
@@ -1035,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 
        if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
-           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+           i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->mm.madv == I915_MADV_WILLNEED) {
                        GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
                        i915_gem_object_clear_tiling_quirk(obj);
@@ -1085,14 +1089,50 @@ out:
        return err;
 }
 
+/*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+       while (atomic_read(&i915->mm.free_count)) {
+               flush_work(&i915->mm.free_work);
+               flush_delayed_work(&i915->bdev.wq);
+               rcu_barrier();
+       }
+}
+
+/*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+       int i;
+
+       for (i = 0; i < 3; i++) {
+               flush_workqueue(i915->wq);
+               rcu_barrier();
+               i915_gem_drain_freed_objects(i915);
+       }
+
+       drain_workqueue(i915->wq);
+}
+
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
        /* We need to fallback to 4K pages if host doesn't support huge gtt. */
        if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
-               mkwrite_device_info(dev_priv)->page_sizes =
-                       I915_GTT_PAGE_SIZE_4K;
+               RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
 
        ret = i915_gem_init_userptr(dev_priv);
        if (ret)
@@ -1173,7 +1213,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
 
 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 {
-       intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
+       intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
 
        i915_gem_suspend_late(dev_priv);
        intel_gt_driver_remove(to_gt(dev_priv));
@@ -1214,7 +1254,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
        i915_gem_init__mm(dev_priv);
        i915_gem_init__contexts(dev_priv);
 
-       spin_lock_init(&dev_priv->fb_tracking.lock);
+       spin_lock_init(&dev_priv->display.fb_tracking.lock);
 }
 
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)