amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
- r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, adev);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
if (r)
DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
schedule_delayed_work(&uq_mgr->resume_work, 0);
}
-int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev)
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
{
mutex_init(&userq_mgr->userq_mutex);
idr_init_base(&userq_mgr->userq_idr, 1);
userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
mutex_lock(&adev->userq_mutex);
list_add(&userq_mgr->list, &adev->userq_mgr_list);
struct amdgpu_device *adev;
struct delayed_work resume_work;
struct list_head list;
+ struct drm_file *file;
};
struct amdgpu_db_info {
int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
-int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev);
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);