iommu/iova: Optimise attempts to allocate iova from 32bit address range
authorGanapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
Wed, 5 Sep 2018 04:27:36 +0000 (09:57 +0530)
committerJoerg Roedel <jroedel@suse.de>
Tue, 25 Sep 2018 08:18:27 +0000 (10:18 +0200)
As an optimisation for PCI devices, there is always first attempt
been made to allocate iova from SAC address range. This will lead
to unnecessary attempts, when there are no free ranges
available. Adding fix to track recently failed iova address size and
allow further attempts, only if requested size is lesser than a failed
size. The size is updated when any replenish happens.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iova.c
include/linux/iova.h

index 83fe2621effe72bc1cbeecd80df4030235d87328..f8d3ba2475237f4477994a7c8b8b1cae0cfe3310 100644 (file)
@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        iovad->granule = granule;
        iovad->start_pfn = start_pfn;
        iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
+       iovad->max32_alloc_size = iovad->dma_32bit_pfn;
        iovad->flush_cb = NULL;
        iovad->fq = NULL;
        iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
@@ -139,8 +140,10 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
 
        cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
        if (free->pfn_hi < iovad->dma_32bit_pfn &&
-           free->pfn_lo >= cached_iova->pfn_lo)
+           free->pfn_lo >= cached_iova->pfn_lo) {
                iovad->cached32_node = rb_next(&free->node);
+               iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+       }
 
        cached_iova = rb_entry(iovad->cached_node, struct iova, node);
        if (free->pfn_lo >= cached_iova->pfn_lo)
@@ -190,6 +193,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 
        /* Walk the tree backwards */
        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       if (limit_pfn <= iovad->dma_32bit_pfn &&
+                       size >= iovad->max32_alloc_size)
+               goto iova32_full;
+
        curr = __get_cached_rbnode(iovad, limit_pfn);
        curr_iova = rb_entry(curr, struct iova, node);
        do {
@@ -200,10 +207,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
                curr_iova = rb_entry(curr, struct iova, node);
        } while (curr && new_pfn <= curr_iova->pfn_hi);
 
-       if (limit_pfn < size || new_pfn < iovad->start_pfn) {
-               spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-               return -ENOMEM;
-       }
+       if (limit_pfn < size || new_pfn < iovad->start_pfn)
+               goto iova32_full;
 
        /* pfn_lo will point to size aligned address if size_aligned is set */
        new->pfn_lo = new_pfn;
@@ -214,9 +219,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        __cached_rbnode_insert_update(iovad, new);
 
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
        return 0;
+
+iova32_full:
+       iovad->max32_alloc_size = size;
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+       return -ENOMEM;
 }
 
 static struct kmem_cache *iova_cache;
index 928442dda565f147b501dca93601731a92581b2d..0b93bf96693ef6f3ea8b6a30315a24fe548443c6 100644 (file)
@@ -75,6 +75,7 @@ struct iova_domain {
        unsigned long   granule;        /* pfn granularity for this domain */
        unsigned long   start_pfn;      /* Lower limit for this domain */
        unsigned long   dma_32bit_pfn;
+       unsigned long   max32_alloc_size; /* Size of last failed allocation */
        struct iova     anchor;         /* rbtree lookup anchor */
        struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE];  /* IOVA range caches */