drm/i915: Move the eviction logic to its own file.
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 7 Aug 2010 10:01:23 +0000 (11:01 +0100)
committerEric Anholt <eric@anholt.net>
Mon, 9 Aug 2010 18:24:32 +0000 (11:24 -0700)
The eviction code is the gnarly underbelly of memory management, and is
clearer if kept separated from the normal domain management in GEM.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Eric Anholt <eric@anholt.net>
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c [new file with mode: 0644]

index da78f2c0d90949b57b067d97a038a324337bb65e..384fd453579631f18dcf5966f0609d2d835b3134 100644 (file)
@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
           i915_suspend.o \
          i915_gem.o \
          i915_gem_debug.o \
+         i915_gem_evict.o \
          i915_gem_tiling.o \
          i915_trace_points.o \
          intel_display.o \
index def6ee0a352457f1019458b977c83850f27e9c58..12c8f47f984b99c47d900396c40f9ce5e664289d 100644 (file)
@@ -982,6 +982,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end);
+int i915_gpu_idle(struct drm_device *dev);
 int i915_gem_idle(struct drm_device *dev);
 uint32_t i915_add_request(struct drm_device *dev,
                struct drm_file *file_priv,
@@ -1007,6 +1008,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
 
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_everything(struct drm_device *dev);
+int i915_gem_evict_inactive(struct drm_device *dev);
+
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
index 45b998218d0c70a111905f7e5e9cb933d224891f..b5a7b00264a6b1109336d17d58ce2849c76164b2 100644 (file)
@@ -49,9 +49,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev, int min_size,
-                                   unsigned alignment);
-static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
@@ -1885,19 +1882,6 @@ i915_gem_flush(struct drm_device *dev,
                                flush_domains);
 }
 
-static void
-i915_gem_flush_ring(struct drm_device *dev,
-              uint32_t invalidate_domains,
-              uint32_t flush_domains,
-              struct intel_ring_buffer *ring)
-{
-       if (flush_domains & I915_GEM_DOMAIN_CPU)
-               drm_agp_chipset_flush(dev);
-       ring->flush(dev, ring,
-                       invalidate_domains,
-                       flush_domains);
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -2008,53 +1992,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        return ret;
 }
 
-static int
-i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
-                                     unsigned alignment, int *found)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *best = NULL;
-       struct drm_gem_object *first = NULL;
-
-       /* Try to find the smallest clean object */
-       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-               struct drm_gem_object *obj = &obj_priv->base;
-               if (obj->size >= min_size) {
-                       if ((!obj_priv->dirty ||
-                            i915_gem_object_is_purgeable(obj_priv)) &&
-                           (!best || obj->size < best->size)) {
-                               best = obj;
-                               if (best->size == min_size)
-                                       break;
-                       }
-                       if (!first)
-                           first = obj;
-               }
-       }
-
-       obj = best ? best : first;
-
-       if (!obj) {
-               *found = 0;
-               return 0;
-       }
-
-       *found = 1;
-
-#if WATCH_LRU
-       DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
-       obj_priv = to_intel_bo(obj);
-       BUG_ON(obj_priv->pin_count != 0);
-       BUG_ON(obj_priv->active);
-
-       /* Wait on the rendering and unbind the buffer. */
-       return i915_gem_object_unbind(obj);
-}
-
-static int
+int
 i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2095,147 +2033,6 @@ i915_gpu_idle(struct drm_device *dev)
        return ret;
 }
 
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
-       bool lists_empty;
-
-       spin_lock(&dev_priv->mm.active_list_lock);
-       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                      list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      (!HAS_BSD(dev)
-                       || list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
-       if (lists_empty)
-               return -ENOSPC;
-
-       /* Flush everything (on to the inactive lists) and evict */
-       ret = i915_gpu_idle(dev);
-       if (ret)
-               return ret;
-
-       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
-       ret = i915_gem_evict_from_inactive_list(dev);
-       if (ret)
-               return ret;
-
-       spin_lock(&dev_priv->mm.active_list_lock);
-       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                      list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      (!HAS_BSD(dev)
-                       || list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-       BUG_ON(!lists_empty);
-
-       return 0;
-}
-
-static int
-i915_gem_evict_something(struct drm_device *dev,
-                        int min_size, unsigned alignment)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret, found;
-
-       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
-       struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
-       for (;;) {
-               i915_gem_retire_requests(dev);
-
-               /* If there's an inactive buffer available now, grab it
-                * and be done.
-                */
-               ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
-                                                           alignment,
-                                                           &found);
-               if (found)
-                       return ret;
-
-               /* If we didn't get anything, but the ring is still processing
-                * things, wait for the next to finish and hopefully leave us
-                * a buffer to evict.
-                */
-               if (!list_empty(&render_ring->request_list)) {
-                       struct drm_i915_gem_request *request;
-
-                       request = list_first_entry(&render_ring->request_list,
-                                                  struct drm_i915_gem_request,
-                                                  list);
-
-                       ret = i915_wait_request(dev,
-                                       request->seqno, request->ring);
-                       if (ret)
-                               return ret;
-
-                       continue;
-               }
-
-               if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
-                       struct drm_i915_gem_request *request;
-
-                       request = list_first_entry(&bsd_ring->request_list,
-                                                  struct drm_i915_gem_request,
-                                                  list);
-
-                       ret = i915_wait_request(dev,
-                                       request->seqno, request->ring);
-                       if (ret)
-                               return ret;
-
-                       continue;
-               }
-
-               /* If we didn't have anything on the request list but there
-                * are buffers awaiting a flush, emit one and try again.
-                * When we wait on it, those buffers waiting for that flush
-                * will get moved to inactive.
-                */
-               if (!list_empty(&dev_priv->mm.flushing_list)) {
-                       struct drm_gem_object *obj = NULL;
-                       struct drm_i915_gem_object *obj_priv;
-
-                       /* Find an object that we can immediately reuse */
-                       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-                               obj = &obj_priv->base;
-                               if (obj->size >= min_size)
-                                       break;
-
-                               obj = NULL;
-                       }
-
-                       if (obj != NULL) {
-                               uint32_t seqno;
-
-                               i915_gem_flush_ring(dev,
-                                              obj->write_domain,
-                                              obj->write_domain,
-                                              obj_priv->ring);
-                               seqno = i915_add_request(dev, NULL,
-                                               obj->write_domain,
-                                               obj_priv->ring);
-                               if (seqno == 0)
-                                       return -ENOMEM;
-                               continue;
-                       }
-               }
-
-               /* If we didn't do any of the above, there's no single buffer
-                * large enough to swap out for the new one, so just evict
-                * everything and start again. (This should be rare.)
-                */
-               if (!list_empty (&dev_priv->mm.inactive_list))
-                       return i915_gem_evict_from_inactive_list(dev);
-               else
-                       return i915_gem_evict_everything(dev);
-       }
-}
-
 int
 i915_gem_object_get_pages(struct drm_gem_object *obj,
                          gfp_t gfpmask)
@@ -4548,30 +4345,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
        i915_gem_free_object_tail(obj);
 }
 
-/** Unbinds all inactive objects. */
-static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       while (!list_empty(&dev_priv->mm.inactive_list)) {
-               struct drm_gem_object *obj;
-               int ret;
-
-               obj = &list_first_entry(&dev_priv->mm.inactive_list,
-                                       struct drm_i915_gem_object,
-                                       list)->base;
-
-               ret = i915_gem_object_unbind(obj);
-               if (ret != 0) {
-                       DRM_ERROR("Error unbinding object: %d\n", ret);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -4596,7 +4369,7 @@ i915_gem_idle(struct drm_device *dev)
 
        /* Under UMS, be paranoid and evict. */
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = i915_gem_evict_from_inactive_list(dev);
+               ret = i915_gem_evict_inactive(dev);
                if (ret) {
                        mutex_unlock(&dev->struct_mutex);
                        return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644 (file)
index 0000000..479e450
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+       return obj_priv->madv == I915_MADV_DONTNEED;
+}
+
+static int
+i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
+                                     unsigned alignment, int *found)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *best = NULL;
+       struct drm_gem_object *first = NULL;
+
+       /* Try to find the smallest clean object */
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               struct drm_gem_object *obj = &obj_priv->base;
+               if (obj->size >= min_size) {
+                       if ((!obj_priv->dirty ||
+                            i915_gem_object_is_purgeable(obj_priv)) &&
+                           (!best || obj->size < best->size)) {
+                               best = obj;
+                               if (best->size == min_size)
+                                       break;
+                       }
+                       if (!first)
+                           first = obj;
+               }
+       }
+
+       obj = best ? best : first;
+
+       if (!obj) {
+               *found = 0;
+               return 0;
+       }
+
+       *found = 1;
+
+#if WATCH_LRU
+       DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+       obj_priv = to_intel_bo(obj);
+       BUG_ON(obj_priv->pin_count != 0);
+       BUG_ON(obj_priv->active);
+
+       /* Wait on the rendering and unbind the buffer. */
+       return i915_gem_object_unbind(obj);
+}
+
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains,
+              struct intel_ring_buffer *ring)
+{
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+       ring->flush(dev, ring,
+                       invalidate_domains,
+                       flush_domains);
+}
+
+int
+i915_gem_evict_something(struct drm_device *dev,
+                        int min_size, unsigned alignment)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret, found;
+
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+       struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
+       for (;;) {
+               i915_gem_retire_requests(dev);
+
+               /* If there's an inactive buffer available now, grab it
+                * and be done.
+                */
+               ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
+                                                           alignment,
+                                                           &found);
+               if (found)
+                       return ret;
+
+               /* If we didn't get anything, but the ring is still processing
+                * things, wait for the next to finish and hopefully leave us
+                * a buffer to evict.
+                */
+               if (!list_empty(&render_ring->request_list)) {
+                       struct drm_i915_gem_request *request;
+
+                       request = list_first_entry(&render_ring->request_list,
+                                                  struct drm_i915_gem_request,
+                                                  list);
+
+                       ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
+                       if (ret)
+                               return ret;
+
+                       continue;
+               }
+
+               if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
+                       struct drm_i915_gem_request *request;
+
+                       request = list_first_entry(&bsd_ring->request_list,
+                                                  struct drm_i915_gem_request,
+                                                  list);
+
+                       ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
+                       if (ret)
+                               return ret;
+
+                       continue;
+               }
+
+               /* If we didn't have anything on the request list but there
+                * are buffers awaiting a flush, emit one and try again.
+                * When we wait on it, those buffers waiting for that flush
+                * will get moved to inactive.
+                */
+               if (!list_empty(&dev_priv->mm.flushing_list)) {
+                       struct drm_gem_object *obj = NULL;
+                       struct drm_i915_gem_object *obj_priv;
+
+                       /* Find an object that we can immediately reuse */
+                       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+                               obj = &obj_priv->base;
+                               if (obj->size >= min_size)
+                                       break;
+
+                               obj = NULL;
+                       }
+
+                       if (obj != NULL) {
+                               uint32_t seqno;
+
+                               i915_gem_flush_ring(dev,
+                                              obj->write_domain,
+                                              obj->write_domain,
+                                              obj_priv->ring);
+                               seqno = i915_add_request(dev, NULL,
+                                               obj->write_domain,
+                                               obj_priv->ring);
+                               if (seqno == 0)
+                                       return -ENOMEM;
+                               continue;
+                       }
+               }
+
+               /* If we didn't do any of the above, there's no single buffer
+                * large enough to swap out for the new one, so just evict
+                * everything and start again. (This should be rare.)
+                */
+               if (!list_empty(&dev_priv->mm.inactive_list))
+                       return i915_gem_evict_inactive(dev);
+               else
+                       return i915_gem_evict_everything(dev);
+       }
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+       bool lists_empty;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+                      list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev)
+                       || list_empty(&dev_priv->bsd_ring.active_list)));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
+       if (lists_empty)
+               return -ENOSPC;
+
+       /* Flush everything (on to the inactive lists) and evict */
+       ret = i915_gpu_idle(dev);
+       if (ret)
+               return ret;
+
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+       ret = i915_gem_evict_inactive(dev);
+       if (ret)
+               return ret;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+                      list_empty(&dev_priv->mm.flushing_list) &&
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev)
+                       || list_empty(&dev_priv->bsd_ring.active_list)));
+       spin_unlock(&dev_priv->mm.active_list_lock);
+       BUG_ON(!lists_empty);
+
+       return 0;
+}
+
+/** Unbinds all inactive objects. */
+int
+i915_gem_evict_inactive(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       while (!list_empty(&dev_priv->mm.inactive_list)) {
+               struct drm_gem_object *obj;
+               int ret;
+
+               obj = &list_first_entry(&dev_priv->mm.inactive_list,
+                                       struct drm_i915_gem_object,
+                                       list)->base;
+
+               ret = i915_gem_object_unbind(obj);
+               if (ret != 0) {
+                       DRM_ERROR("Error unbinding object: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}