mutex_unlock(&userq_mgr->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ ret |= userq_funcs->unmap(uqm, queue);
+ }
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ ret |= userq_funcs->map(uqm, queue);
+ }
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}