iommu/dma: Use NUMA aware memory allocations in __iommu_dma_alloc_pages()
authorGanapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
Fri, 30 Nov 2018 11:14:00 +0000 (19:14 +0800)
committerJoerg Roedel <jroedel@suse.de>
Tue, 11 Dec 2018 09:29:39 +0000 (10:29 +0100)
Change function __iommu_dma_alloc_pages() to allocate pages for DMA from
respective device NUMA node. The ternary operator which would be for
alloc_pages_node() is tidied along with this.

The motivation for this change is to have a policy for page allocation
consistent with direct DMA mapping, which attempts to allocate pages local
to the device, as mentioned in [1].

In addition, for certain workloads it has been observed a marginal
performance improvement. The patch caused an observation of 0.9% average
throughput improvement for running tcrypt with HiSilicon crypto engine.

We also include a modification to use kvzalloc() for kzalloc()/vzalloc()
combination.

[1] https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg1692998.html

Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
[JPG: Added kvzalloc(), drop pages ** being device local, remove ternary operator, update message]
Signed-off-by: John Garry <john.garry@huawei.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c

index d1b04753b2040a95868f7a17ab9b919aad842314..4afb1a8a8da8a1a6b2fcd53034cc02cad6fc962d 100644 (file)
@@ -449,20 +449,17 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
        kvfree(pages);
 }
 
-static struct page **__iommu_dma_alloc_pages(unsigned int count,
-               unsigned long order_mask, gfp_t gfp)
+static struct page **__iommu_dma_alloc_pages(struct device *dev,
+               unsigned int count, unsigned long order_mask, gfp_t gfp)
 {
        struct page **pages;
-       unsigned int i = 0, array_size = count * sizeof(*pages);
+       unsigned int i = 0, nid = dev_to_node(dev);
 
        order_mask &= (2U << MAX_ORDER) - 1;
        if (!order_mask)
                return NULL;
 
-       if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, GFP_KERNEL);
-       else
-               pages = vzalloc(array_size);
+       pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
        if (!pages)
                return NULL;
 
@@ -481,10 +478,12 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
                for (order_mask &= (2U << __fls(count)) - 1;
                     order_mask; order_mask &= ~order_size) {
                        unsigned int order = __fls(order_mask);
+                       gfp_t alloc_flags = gfp;
 
                        order_size = 1U << order;
-                       page = alloc_pages((order_mask - order_size) ?
-                                          gfp | __GFP_NORETRY : gfp, order);
+                       if (order_mask > order_size)
+                               alloc_flags |= __GFP_NORETRY;
+                       page = alloc_pages_node(nid, alloc_flags, order);
                        if (!page)
                                continue;
                        if (!order)
@@ -569,7 +568,8 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
                alloc_sizes = min_size;
 
        count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
+       pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
+                                       gfp);
        if (!pages)
                return NULL;