mm/hmm: add a helper function that fault pages and map them to a device
authorJérôme Glisse <jglisse@redhat.com>
Tue, 14 May 2019 00:20:28 +0000 (17:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 May 2019 16:47:48 +0000 (09:47 -0700)
This is a all in one helper that fault pages in a range and map them to a
device so that every single device driver do not have to re-implement this
common pattern.

This is taken from ODP RDMA in preparation of ODP RDMA convertion.  It
will be use by nouveau and other drivers.

[jglisse@redhat.com: Was using wrong field and wrong enum]
Link: http://lkml.kernel.org/r/20190409175340.26614-1-jglisse@redhat.com
Link: http://lkml.kernel.org/r/20190403193318.16478-12-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/hmm.h
mm/hmm.c

index a79fcc6681f5c97a2de877460dc6569a81043528..f81fe2c0f3432bc706f13b95ab602d29c38e2f3b 100644 (file)
@@ -474,6 +474,15 @@ int hmm_range_register(struct hmm_range *range,
 void hmm_range_unregister(struct hmm_range *range);
 long hmm_range_snapshot(struct hmm_range *range);
 long hmm_range_fault(struct hmm_range *range, bool block);
+long hmm_range_dma_map(struct hmm_range *range,
+                      struct device *device,
+                      dma_addr_t *daddrs,
+                      bool block);
+long hmm_range_dma_unmap(struct hmm_range *range,
+                        struct vm_area_struct *vma,
+                        struct device *device,
+                        dma_addr_t *daddrs,
+                        bool dirty);
 
 /*
  * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
index b1c9b05bf26f4192acbf2d2e3f8fbcdf9904ea06..95fa7abb9d6727c21385e7496de1837c38ea58d6 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -30,6 +30,7 @@
 #include <linux/hugetlb.h>
 #include <linux/memremap.h>
 #include <linux/jump_label.h>
+#include <linux/dma-mapping.h>
 #include <linux/mmu_notifier.h>
 #include <linux/memory_hotplug.h>
 
@@ -1182,6 +1183,157 @@ long hmm_range_fault(struct hmm_range *range, bool block)
        return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
 }
 EXPORT_SYMBOL(hmm_range_fault);
+
+/**
+ * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
+ * @range: range being faulted
+ * @device: device against to dma map page to
+ * @daddrs: dma address of mapped pages
+ * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
+ * Returns: number of pages mapped on success, -EAGAIN if mmap_sem have been
+ *          drop and you need to try again, some other error value otherwise
+ *
+ * Note same usage pattern as hmm_range_fault().
+ */
+long hmm_range_dma_map(struct hmm_range *range,
+                      struct device *device,
+                      dma_addr_t *daddrs,
+                      bool block)
+{
+       unsigned long i, npages, mapped;
+       long ret;
+
+       ret = hmm_range_fault(range, block);
+       if (ret <= 0)
+               return ret ? ret : -EBUSY;
+
+       npages = (range->end - range->start) >> PAGE_SHIFT;
+       for (i = 0, mapped = 0; i < npages; ++i) {
+               enum dma_data_direction dir = DMA_TO_DEVICE;
+               struct page *page;
+
+               /*
+                * FIXME need to update DMA API to provide invalid DMA address
+                * value instead of a function to test dma address value. This
+                * would remove lot of dumb code duplicated accross many arch.
+                *
+                * For now setting it to 0 here is good enough as the pfns[]
+                * value is what is use to check what is valid and what isn't.
+                */
+               daddrs[i] = 0;
+
+               page = hmm_pfn_to_page(range, range->pfns[i]);
+               if (page == NULL)
+                       continue;
+
+               /* Check if range is being invalidated */
+               if (!range->valid) {
+                       ret = -EBUSY;
+                       goto unmap;
+               }
+
+               /* If it is read and write than map bi-directional. */
+               if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
+                       dir = DMA_BIDIRECTIONAL;
+
+               daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
+               if (dma_mapping_error(device, daddrs[i])) {
+                       ret = -EFAULT;
+                       goto unmap;
+               }
+
+               mapped++;
+       }
+
+       return mapped;
+
+unmap:
+       for (npages = i, i = 0; (i < npages) && mapped; ++i) {
+               enum dma_data_direction dir = DMA_TO_DEVICE;
+               struct page *page;
+
+               page = hmm_pfn_to_page(range, range->pfns[i]);
+               if (page == NULL)
+                       continue;
+
+               if (dma_mapping_error(device, daddrs[i]))
+                       continue;
+
+               /* If it is read and write than map bi-directional. */
+               if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
+                       dir = DMA_BIDIRECTIONAL;
+
+               dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
+               mapped--;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(hmm_range_dma_map);
+
+/**
+ * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
+ * @range: range being unmapped
+ * @vma: the vma against which the range (optional)
+ * @device: device against which dma map was done
+ * @daddrs: dma address of mapped pages
+ * @dirty: dirty page if it had the write flag set
+ * Returns: number of page unmapped on success, -EINVAL otherwise
+ *
+ * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
+ * to the sync_cpu_device_pagetables() callback so that it is safe here to
+ * call set_page_dirty(). Caller must also take appropriate locks to avoid
+ * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
+ */
+long hmm_range_dma_unmap(struct hmm_range *range,
+                        struct vm_area_struct *vma,
+                        struct device *device,
+                        dma_addr_t *daddrs,
+                        bool dirty)
+{
+       unsigned long i, npages;
+       long cpages = 0;
+
+       /* Sanity check. */
+       if (range->end <= range->start)
+               return -EINVAL;
+       if (!daddrs)
+               return -EINVAL;
+       if (!range->pfns)
+               return -EINVAL;
+
+       npages = (range->end - range->start) >> PAGE_SHIFT;
+       for (i = 0; i < npages; ++i) {
+               enum dma_data_direction dir = DMA_TO_DEVICE;
+               struct page *page;
+
+               page = hmm_pfn_to_page(range, range->pfns[i]);
+               if (page == NULL)
+                       continue;
+
+               /* If it is read and write than map bi-directional. */
+               if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
+                       dir = DMA_BIDIRECTIONAL;
+
+                       /*
+                        * See comments in function description on why it is
+                        * safe here to call set_page_dirty()
+                        */
+                       if (dirty)
+                               set_page_dirty(page);
+               }
+
+               /* Unmap and clear pfns/dma address */
+               dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
+               range->pfns[i] = range->values[HMM_PFN_NONE];
+               /* FIXME see comments in hmm_vma_dma_map() */
+               daddrs[i] = 0;
+               cpages++;
+       }
+
+       return cpages;
+}
+EXPORT_SYMBOL(hmm_range_dma_unmap);
 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */