drm/amdgpu: IOCTL interface for PRT support v4
authorJunwei Zhang <Jerry.Zhang@amd.com>
Mon, 16 Jan 2017 05:59:01 +0000 (13:59 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 30 Mar 2017 03:52:56 +0000 (23:52 -0400)
Till GFX8 we can only enable PRT support globally, but with the next hardware
generation we can do this on a per page basis.

Keep the interface consistent by adding PRT mappings and enable
support globally on current hardware when the first mapping is made.

v2: disable PRT support delayed and on all error paths
v3: PRT and other permissions are mutal exclusive,
    PRT mappings don't need a BO.
v4: update PRT mappings durign CS as well, make va_flags 64bit

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
include/uapi/drm/amdgpu_drm.h

index 618f12884eedc2089810f5fb1914bebcffe415b4..b9212537b17d57c71f12e8120a785e5a01f13c45 100644 (file)
@@ -701,6 +701,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
 struct amdgpu_fpriv {
        struct amdgpu_vm        vm;
+       struct amdgpu_bo_va     *prt_va;
        struct mutex            bo_list_lock;
        struct idr              bo_list_handles;
        struct amdgpu_ctx_mgr   ctx_mgr;
index 99424cb8020bdf914b5627bffce01155ba8f6b73..89dcb07ab2139facba5e1dc1b88575534a4fb315 100644 (file)
@@ -759,10 +759,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
        amdgpu_bo_unref(&parser->uf_entry.robj);
 }
 
-static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
-                                  struct amdgpu_vm *vm)
+static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
 {
        struct amdgpu_device *adev = p->adev;
+       struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va *bo_va;
        struct amdgpu_bo *bo;
        int i, r;
@@ -779,6 +780,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
        if (r)
                return r;
 
+       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+       if (r)
+               return r;
+
+       r = amdgpu_sync_fence(adev, &p->job->sync,
+                             fpriv->prt_va->last_pt_update);
+       if (r)
+               return r;
+
        if (amdgpu_sriov_vf(adev)) {
                struct dma_fence *f;
                bo_va = vm->csa_bo_va;
@@ -855,7 +865,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
        if (p->job->vm) {
                p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 
-               r = amdgpu_bo_vm_update_pte(p, vm);
+               r = amdgpu_bo_vm_update_pte(p);
                if (r)
                        return r;
        }
index 106cf83c2e6b46aa711b7e82381e22b8dd449aa7..3c22656aa1bf3dbda87c62c1502f104fdb9c6556 100644 (file)
@@ -553,6 +553,12 @@ error:
 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
+               AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+               AMDGPU_VM_PAGE_EXECUTABLE;
+       const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
+               AMDGPU_VM_PAGE_PRT;
+
        struct drm_amdgpu_gem_va *args = data;
        struct drm_gem_object *gobj;
        struct amdgpu_device *adev = dev->dev_private;
@@ -563,7 +569,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct list_head list;
-       uint32_t invalid_flags, va_flags = 0;
+       uint64_t va_flags = 0;
        int r = 0;
 
        if (!adev->vm_manager.enabled)
@@ -577,11 +583,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
-                       AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
-       if ((args->flags & invalid_flags)) {
-               dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
-                       args->flags, invalid_flags);
+       if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
+               dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+                       args->flags);
                return -EINVAL;
        }
 
@@ -595,28 +599,34 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       gobj = drm_gem_object_lookup(filp, args->handle);
-       if (gobj == NULL)
-               return -ENOENT;
-       abo = gem_to_amdgpu_bo(gobj);
        INIT_LIST_HEAD(&list);
-       tv.bo = &abo->tbo;
-       tv.shared = false;
-       list_add(&tv.head, &list);
+       if (!(args->flags & AMDGPU_VM_PAGE_PRT)) {
+               gobj = drm_gem_object_lookup(filp, args->handle);
+               if (gobj == NULL)
+                       return -ENOENT;
+               abo = gem_to_amdgpu_bo(gobj);
+               tv.bo = &abo->tbo;
+               tv.shared = false;
+               list_add(&tv.head, &list);
+       } else {
+               gobj = NULL;
+               abo = NULL;
+       }
 
        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
        r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
-       if (r) {
-               drm_gem_object_unreference_unlocked(gobj);
-               return r;
-       }
+       if (r)
+               goto error_unref;
 
-       bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
-       if (!bo_va) {
-               ttm_eu_backoff_reservation(&ticket, &list);
-               drm_gem_object_unreference_unlocked(gobj);
-               return -ENOENT;
+       if (abo) {
+               bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
+               if (!bo_va) {
+                       r = -ENOENT;
+                       goto error_backoff;
+               }
+       } else {
+               bo_va = fpriv->prt_va;
        }
 
        switch (args->operation) {
@@ -627,6 +637,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                        va_flags |= AMDGPU_PTE_WRITEABLE;
                if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
                        va_flags |= AMDGPU_PTE_EXECUTABLE;
+               if (args->flags & AMDGPU_VM_PAGE_PRT)
+                       va_flags |= AMDGPU_PTE_PRT;
                r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
                                     args->offset_in_bo, args->map_size,
                                     va_flags);
@@ -637,11 +649,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        default:
                break;
        }
-       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
-           !amdgpu_vm_debug)
+       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
                amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
+
+error_backoff:
        ttm_eu_backoff_reservation(&ticket, &list);
 
+error_unref:
        drm_gem_object_unreference_unlocked(gobj);
        return r;
 }
index 61d94c7456723931cdeeabf044958d3f4096751c..49f93ee019e310617fd84d50fa8aaf73e0ca51e6 100644 (file)
@@ -655,6 +655,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                goto out_suspend;
        }
 
+       fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
+       if (!fpriv->prt_va) {
+               r = -ENOMEM;
+               amdgpu_vm_fini(adev, &fpriv->vm);
+               kfree(fpriv);
+               goto out_suspend;
+       }
+
        if (amdgpu_sriov_vf(adev)) {
                r = amdgpu_map_static_csa(adev, &fpriv->vm);
                if (r)
@@ -699,6 +707,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
        amdgpu_uvd_free_handles(adev, file_priv);
        amdgpu_vce_free_handles(adev, file_priv);
 
+       amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
+
        if (amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
index 5797283c2d79b918eaef665f053d76cdd3e0d36d..1c0ddf71193e562a89e647e7da101b8290c362f1 100644 (file)
@@ -361,6 +361,8 @@ struct drm_amdgpu_gem_op {
 #define AMDGPU_VM_PAGE_WRITEABLE       (1 << 2)
 /* executable mapping, new for VI */
 #define AMDGPU_VM_PAGE_EXECUTABLE      (1 << 3)
+/* partially resident texture */
+#define AMDGPU_VM_PAGE_PRT             (1 << 4)
 
 struct drm_amdgpu_gem_va {
        /** GEM object handle */