drm: Micro-optimise drm_mm_for_each_node_in_range()
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 4 Feb 2017 11:19:13 +0000 (11:19 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 6 Feb 2017 15:57:37 +0000 (16:57 +0100)
As we require valid start/end parameters, we can replace the initial
potential NULL with a pointer to the drm_mm.head_node and so reduce the
test on every iteration from a NULL + address comparison to just an
address comparison.

add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-26 (-26)
function                                     old     new   delta
i915_gem_evict_for_node                      719     693     -26

(No other users outside of the test harness.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20170204111913.12416-1-chris@chris-wilson.co.uk
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/selftests/test-drm_mm.c
include/drm/drm_mm.h

index 8bfb0b327267b461a5bb41cc5befe385f9637824..f794089d30ac21de22894774f13d4d2871213573 100644 (file)
@@ -170,7 +170,7 @@ struct drm_mm_node *
 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 {
        return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
-                                              start, last);
+                                              start, last) ?: (struct drm_mm_node *)&mm->head_node;
 }
 EXPORT_SYMBOL(__drm_mm_interval_first);
 
index 1e71bc182ca96a6bda444650f3f30db15ce95df8..2958f596081ee819399e6c1d068648c7ba154657 100644 (file)
@@ -839,16 +839,18 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
                n++;
        }
 
-       drm_mm_for_each_node_in_range(node, mm, 0, start) {
-               if (node) {
+       if (start > 0) {
+               node = __drm_mm_interval_first(mm, 0, start - 1);
+               if (node->allocated) {
                        pr_err("node before start: node=%llx+%llu, start=%llx\n",
                               node->start, node->size, start);
                        return false;
                }
        }
 
-       drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
-               if (node) {
+       if (end < U64_MAX) {
+               node = __drm_mm_interval_first(mm, end, U64_MAX);
+               if (node->allocated) {
                        pr_err("node after end: node=%llx+%llu, end=%llx\n",
                               node->start, node->size, end);
                        return false;
index d81b0ba9921fb5d855c28ed672e0352a856214c9..f262da18011723b1f504321838d575a7274822be 100644 (file)
@@ -459,10 +459,13 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
  * but using the internal interval tree to accelerate the search for the
  * starting node, and so not safe against removal of elements. It assumes
  * that @end is within (or is the upper limit of) the drm_mm allocator.
+ * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
+ * over the special _unallocated_ &drm_mm.head_node, and may even continue
+ * indefinitely.
  */
 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)    \
        for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
-            node__ && node__->start < (end__);                         \
+            node__->start < (end__);                                   \
             node__ = list_next_entry(node__, node_list))
 
 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,