drm/ttm: Convert vm callbacks to helpers
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 25 Sep 2019 13:11:23 +0000 (15:11 +0200)
committerChristian König <christian.koenig@amd.com>
Mon, 4 Nov 2019 12:02:30 +0000 (13:02 +0100)
The default TTM fault handler may not be completely sufficient
(vmwgfx needs to do some bookkeeping, control the write protectionand also
needs to restrict the number of prefaults).

Also make it possible replicate ttm_bo_vm_reserve() functionality for,
for example, mkwrite handlers.

So turn the TTM vm code into helpers: ttm_bo_vm_fault_reserved(),
ttm_bo_vm_open(), ttm_bo_vm_close() and ttm_bo_vm_reserve(). Also provide
a default TTM fault handler for other drivers to use.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/332900/?series=67217&rev=1
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/ttm/ttm_bo_vm.c
include/drm/ttm/ttm_bo_api.h

index 2fa226c61c6f32e8a2cf8306133f32b9777bc2a1..11863fbdd5d690f5b947e145d937c76a72beb22d 100644 (file)
@@ -42,8 +42,6 @@
 #include <linux/uaccess.h>
 #include <linux/mem_encrypt.h>
 
-#define TTM_BO_VM_NUM_PREFAULT 16
-
 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                                struct vm_fault *vmf)
 {
@@ -106,24 +104,30 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
                + page_offset;
 }
 
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+/**
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
+ * @bo: The buffer object
+ * @vmf: The fault structure handed to the callback
+ *
+ * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * during long waits, and after the wait the callback will be restarted. This
+ * is to allow other threads using the same virtual memory space concurrent
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
+ * object reservations sometimes wait for GPU and should therefore be
+ * considered long waits. This function reserves the buffer object interruptibly
+ * taking this into account. Starvation is avoided by the vm system not
+ * allowing too many repeated restarts.
+ * This function is intended to be used in customized fault() and _mkwrite()
+ * handlers.
+ *
+ * Return:
+ *    0 on success and the bo was reserved.
+ *    VM_FAULT_RETRY if blocking wait.
+ *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
+ */
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+                            struct vm_fault *vmf)
 {
-       struct vm_area_struct *vma = vmf->vma;
-       struct ttm_buffer_object *bo = vma->vm_private_data;
-       struct ttm_bo_device *bdev = bo->bdev;
-       unsigned long page_offset;
-       unsigned long page_last;
-       unsigned long pfn;
-       struct ttm_tt *ttm = NULL;
-       struct page *page;
-       int err;
-       int i;
-       vm_fault_t ret = VM_FAULT_NOPAGE;
-       unsigned long address = vmf->address;
-       struct ttm_mem_type_manager *man =
-               &bdev->man[bo->mem.mem_type];
-       struct vm_area_struct cvma;
-
        /*
         * Work around locking order reversal in fault / nopfn
         * between mmap_sem and bo_reserve: Perform a trylock operation
@@ -150,14 +154,54 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                return VM_FAULT_NOPAGE;
        }
 
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
+
+/**
+ * ttm_bo_vm_fault_reserved - TTM fault helper
+ * @vmf: The struct vm_fault given as argument to the fault callback
+ * @prot: The page protection to be used for this memory area.
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
+ * specify this based on madvice settings and the size of the GPU object
+ * backed by the memory.
+ *
+ * This function inserts one or more page table entries pointing to the
+ * memory backing the buffer object, and then returns a return code
+ * instructing the caller to retry the page access.
+ *
+ * Return:
+ *   VM_FAULT_NOPAGE on success or pending signal
+ *   VM_FAULT_SIGBUS on unspecified error
+ *   VM_FAULT_OOM on out-of-memory
+ *   VM_FAULT_RETRY if retryable wait
+ */
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+                                   pgprot_t prot,
+                                   pgoff_t num_prefault)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct vm_area_struct cvma = *vma;
+       struct ttm_buffer_object *bo = vma->vm_private_data;
+       struct ttm_bo_device *bdev = bo->bdev;
+       unsigned long page_offset;
+       unsigned long page_last;
+       unsigned long pfn;
+       struct ttm_tt *ttm = NULL;
+       struct page *page;
+       int err;
+       pgoff_t i;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
+       unsigned long address = vmf->address;
+       struct ttm_mem_type_manager *man =
+               &bdev->man[bo->mem.mem_type];
+
        /*
         * Refuse to fault imported pages. This should be handled
         * (if at all) by redirecting mmap to the exporter.
         */
-       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
-               ret = VM_FAULT_SIGBUS;
-               goto out_unlock;
-       }
+       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+               return VM_FAULT_SIGBUS;
 
        if (bdev->driver->fault_reserve_notify) {
                struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -168,11 +212,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                        break;
                case -EBUSY:
                case -ERESTARTSYS:
-                       ret = VM_FAULT_NOPAGE;
-                       goto out_unlock;
+                       return VM_FAULT_NOPAGE;
                default:
-                       ret = VM_FAULT_SIGBUS;
-                       goto out_unlock;
+                       return VM_FAULT_SIGBUS;
                }
 
                if (bo->moving != moving) {
@@ -188,21 +230,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
         * move.
         */
        ret = ttm_bo_vm_fault_idle(bo, vmf);
-       if (unlikely(ret != 0)) {
-               if (ret == VM_FAULT_RETRY &&
-                   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
-                       /* The BO has already been unreserved. */
-                       return ret;
-               }
-
-               goto out_unlock;
-       }
+       if (unlikely(ret != 0))
+               return ret;
 
        err = ttm_mem_io_lock(man, true);
-       if (unlikely(err != 0)) {
-               ret = VM_FAULT_NOPAGE;
-               goto out_unlock;
-       }
+       if (unlikely(err != 0))
+               return VM_FAULT_NOPAGE;
        err = ttm_mem_io_reserve_vm(bo);
        if (unlikely(err != 0)) {
                ret = VM_FAULT_SIGBUS;
@@ -219,18 +252,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                goto out_io_unlock;
        }
 
-       /*
-        * Make a local vma copy to modify the page_prot member
-        * and vm_flags if necessary. The vma parameter is protected
-        * by mmap_sem in write mode.
-        */
-       cvma = *vma;
-       cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
-       if (bo->mem.bus.is_iomem) {
-               cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
-                                               cvma.vm_page_prot);
-       } else {
+       cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+       if (!bo->mem.bus.is_iomem) {
                struct ttm_operation_ctx ctx = {
                        .interruptible = false,
                        .no_wait_gpu = false,
@@ -239,24 +262,21 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                };
 
                ttm = bo->ttm;
-               cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
-                                               cvma.vm_page_prot);
-
-               /* Allocate all page at once, most common usage */
-               if (ttm_tt_populate(ttm, &ctx)) {
+               if (ttm_tt_populate(bo->ttm, &ctx)) {
                        ret = VM_FAULT_OOM;
                        goto out_io_unlock;
                }
+       } else {
+               /* Iomem should not be marked encrypted */
+               cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
        }
 
        /*
         * Speculatively prefault a number of pages. Only error on
         * first page.
         */
-       for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+       for (i = 0; i < num_prefault; ++i) {
                if (bo->mem.bus.is_iomem) {
-                       /* Iomem should not be marked encrypted */
-                       cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
                        pfn = ttm_bo_io_mem_pfn(bo, page_offset);
                } else {
                        page = ttm->pages[page_offset];
@@ -292,12 +312,32 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
        ret = VM_FAULT_NOPAGE;
 out_io_unlock:
        ttm_mem_io_unlock(man);
-out_unlock:
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
+
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       pgprot_t prot;
+       struct ttm_buffer_object *bo = vma->vm_private_data;
+       vm_fault_t ret;
+
+       ret = ttm_bo_vm_reserve(bo, vmf);
+       if (ret)
+               return ret;
+
+       prot = vm_get_page_prot(vma->vm_flags);
+       ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+               return ret;
+
        dma_resv_unlock(bo->base.resv);
+
        return ret;
 }
 
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
+void ttm_bo_vm_open(struct vm_area_struct *vma)
 {
        struct ttm_buffer_object *bo = vma->vm_private_data;
 
@@ -305,14 +345,16 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
 
        ttm_bo_get(bo);
 }
+EXPORT_SYMBOL(ttm_bo_vm_open);
 
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
+void ttm_bo_vm_close(struct vm_area_struct *vma)
 {
        struct ttm_buffer_object *bo = vma->vm_private_data;
 
        ttm_bo_put(bo);
        vma->vm_private_data = NULL;
 }
+EXPORT_SYMBOL(ttm_bo_vm_close);
 
 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
                                 unsigned long offset,
index 54fa457b26abb9a97ef898ccc1de3631952151f2..65e399d280f7ddd9e021dc4babb73c61dd026878 100644 (file)
@@ -727,4 +727,18 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
 {
        return bo->base.dev != NULL;
 }
+
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+                            struct vm_fault *vmf);
+
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+                                   pgprot_t prot,
+                                   pgoff_t num_prefault);
+
+void ttm_bo_vm_open(struct vm_area_struct *vma);
+
+void ttm_bo_vm_close(struct vm_area_struct *vma);
 #endif