1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/file.h>
8 #include <linux/sync_file.h>
9 #include <linux/uaccess.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
18 #include "msm_gpu_trace.h"
21 * Cmdstream submission:
24 static struct msm_gem_submit *submit_create(struct drm_device *dev,
26 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
29 struct msm_gem_submit *submit;
33 sz = struct_size(submit, bos, nr_bos) +
34 ((u64)nr_cmds * sizeof(submit->cmd[0]));
37 return ERR_PTR(-ENOMEM);
39 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
41 return ERR_PTR(-ENOMEM);
43 ret = drm_sched_job_init(&submit->base, queue->entity, queue);
49 kref_init(&submit->ref);
51 submit->aspace = queue->ctx->aspace;
53 submit->cmd = (void *)&submit->bos[nr_bos];
54 submit->queue = queue;
55 submit->ring = gpu->rb[queue->ring_nr];
56 submit->fault_dumped = false;
58 INIT_LIST_HEAD(&submit->node);
63 void __msm_gem_submit_destroy(struct kref *kref)
65 struct msm_gem_submit *submit =
66 container_of(kref, struct msm_gem_submit, ref);
69 if (submit->fence_id) {
70 mutex_lock(&submit->queue->lock);
71 idr_remove(&submit->queue->fence_idr, submit->fence_id);
72 mutex_unlock(&submit->queue->lock);
75 dma_fence_put(submit->user_fence);
76 dma_fence_put(submit->hw_fence);
79 msm_submitqueue_put(submit->queue);
81 for (i = 0; i < submit->nr_cmds; i++)
82 kfree(submit->cmd[i].relocs);
87 static int submit_lookup_objects(struct msm_gem_submit *submit,
88 struct drm_msm_gem_submit *args, struct drm_file *file)
93 for (i = 0; i < args->nr_bos; i++) {
94 struct drm_msm_gem_submit_bo submit_bo;
95 void __user *userptr =
96 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
98 /* make sure we don't have garbage flags, in case we hit
99 * error path before flags is initialized:
101 submit->bos[i].flags = 0;
103 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
109 /* at least one of READ and/or WRITE flags should be set: */
110 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
112 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
113 !(submit_bo.flags & MANDATORY_FLAGS)) {
114 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
120 submit->bos[i].handle = submit_bo.handle;
121 submit->bos[i].flags = submit_bo.flags;
122 /* in validate_objects() we figure out if this is true: */
123 submit->bos[i].iova = submit_bo.presumed;
126 spin_lock(&file->table_lock);
128 for (i = 0; i < args->nr_bos; i++) {
129 struct drm_gem_object *obj;
131 /* normally use drm_gem_object_lookup(), but for bulk lookup
132 * all under single table_lock just hit object_idr directly:
134 obj = idr_find(&file->object_idr, submit->bos[i].handle);
136 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
141 drm_gem_object_get(obj);
143 submit->bos[i].obj = to_msm_bo(obj);
147 spin_unlock(&file->table_lock);
155 static int submit_lookup_cmds(struct msm_gem_submit *submit,
156 struct drm_msm_gem_submit *args, struct drm_file *file)
162 for (i = 0; i < args->nr_cmds; i++) {
163 struct drm_msm_gem_submit_cmd submit_cmd;
164 void __user *userptr =
165 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
167 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
173 /* validate input from userspace: */
174 switch (submit_cmd.type) {
175 case MSM_SUBMIT_CMD_BUF:
176 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
177 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
180 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
184 if (submit_cmd.size % 4) {
185 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
191 submit->cmd[i].type = submit_cmd.type;
192 submit->cmd[i].size = submit_cmd.size / 4;
193 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
194 submit->cmd[i].idx = submit_cmd.submit_idx;
195 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
197 userptr = u64_to_user_ptr(submit_cmd.relocs);
199 sz = array_size(submit_cmd.nr_relocs,
200 sizeof(struct drm_msm_gem_submit_reloc));
201 /* check for overflow: */
202 if (sz == SIZE_MAX) {
206 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
207 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
218 /* Unwind bo state, according to cleanup_flags. In the success case, only
219 * the lock is dropped at the end of the submit (and active/pin ref is dropped
220 * later when the submit is retired).
222 static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
223 unsigned cleanup_flags)
225 struct drm_gem_object *obj = &submit->bos[i].obj->base;
226 unsigned flags = submit->bos[i].flags & cleanup_flags;
229 * Clear flags bit before dropping lock, so that the msm_job_run()
230 * path isn't racing with submit_cleanup() (ie. the read/modify/
231 * write is protected by the obj lock in all paths)
233 submit->bos[i].flags &= ~cleanup_flags;
235 if (flags & BO_PINNED)
236 msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
238 if (flags & BO_ACTIVE)
239 msm_gem_active_put(obj);
241 if (flags & BO_LOCKED)
242 dma_resv_unlock(obj->resv);
245 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
247 submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
249 if (!(submit->bos[i].flags & BO_VALID))
250 submit->bos[i].iova = 0;
253 /* This is where we make sure all the bo's are reserved and pin'd: */
254 static int submit_lock_objects(struct msm_gem_submit *submit)
256 int contended, slow_locked = -1, i, ret = 0;
259 for (i = 0; i < submit->nr_bos; i++) {
260 struct msm_gem_object *msm_obj = submit->bos[i].obj;
262 if (slow_locked == i)
267 if (!(submit->bos[i].flags & BO_LOCKED)) {
268 ret = dma_resv_lock_interruptible(msm_obj->base.resv,
272 submit->bos[i].flags |= BO_LOCKED;
276 ww_acquire_done(&submit->ticket);
281 if (ret == -EALREADY) {
282 DRM_ERROR("handle %u at index %u already on submit list\n",
283 submit->bos[i].handle, i);
288 submit_unlock_unpin_bo(submit, i);
291 submit_unlock_unpin_bo(submit, slow_locked);
293 if (ret == -EDEADLK) {
294 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
295 /* we lost out in a seqno race, lock and retry.. */
296 ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
299 submit->bos[contended].flags |= BO_LOCKED;
300 slow_locked = contended;
304 /* Not expecting -EALREADY here, if the bo was already
305 * locked, we should have gotten -EALREADY already from
306 * the dma_resv_lock_interruptable() call.
308 WARN_ON_ONCE(ret == -EALREADY);
314 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
318 for (i = 0; i < submit->nr_bos; i++) {
319 struct drm_gem_object *obj = &submit->bos[i].obj->base;
320 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
322 /* NOTE: _reserve_shared() must happen before
323 * _add_shared_fence(), which makes this a slightly
324 * strange place to call it. OTOH this is a
325 * convenient can-fail point to hook it in.
327 ret = dma_resv_reserve_fences(obj->resv, 1);
331 /* exclusive fences must be ordered */
332 if (no_implicit && !write)
335 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
345 static int submit_pin_objects(struct msm_gem_submit *submit)
349 submit->valid = true;
352 * Increment active_count first, so if under memory pressure, we
353 * don't inadvertently evict a bo needed by the submit in order
354 * to pin an earlier bo in the same submit.
356 for (i = 0; i < submit->nr_bos; i++) {
357 struct drm_gem_object *obj = &submit->bos[i].obj->base;
359 msm_gem_active_get(obj, submit->gpu);
360 submit->bos[i].flags |= BO_ACTIVE;
363 for (i = 0; i < submit->nr_bos; i++) {
364 struct drm_gem_object *obj = &submit->bos[i].obj->base;
365 struct msm_gem_vma *vma;
367 /* if locking succeeded, pin bo: */
368 vma = msm_gem_get_vma_locked(obj, submit->aspace);
374 ret = msm_gem_pin_vma_locked(obj, vma);
378 submit->bos[i].flags |= BO_PINNED;
379 submit->bos[i].vma = vma;
381 if (vma->iova == submit->bos[i].iova) {
382 submit->bos[i].flags |= BO_VALID;
384 submit->bos[i].iova = vma->iova;
385 /* iova changed, so address in cmdstream is not valid: */
386 submit->bos[i].flags &= ~BO_VALID;
387 submit->valid = false;
394 static void submit_attach_object_fences(struct msm_gem_submit *submit)
398 for (i = 0; i < submit->nr_bos; i++) {
399 struct drm_gem_object *obj = &submit->bos[i].obj->base;
401 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
402 dma_resv_add_fence(obj->resv, submit->user_fence,
403 DMA_RESV_USAGE_WRITE);
404 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
405 dma_resv_add_fence(obj->resv, submit->user_fence,
406 DMA_RESV_USAGE_READ);
410 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
411 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
413 if (idx >= submit->nr_bos) {
414 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
415 idx, submit->nr_bos);
420 *obj = submit->bos[idx].obj;
422 *iova = submit->bos[idx].iova;
424 *valid = !!(submit->bos[idx].flags & BO_VALID);
429 /* process the reloc's and patch up the cmdstream as needed: */
430 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
431 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
433 uint32_t i, last_offset = 0;
441 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
445 /* For now, just map the entire thing. Eventually we probably
446 * to do it page-by-page, w/ kmap() if not vmap()d..
448 ptr = msm_gem_get_vaddr_locked(&obj->base);
452 DBG("failed to map: %d", ret);
456 for (i = 0; i < nr_relocs; i++) {
457 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
462 if (submit_reloc.submit_offset % 4) {
463 DRM_ERROR("non-aligned reloc offset: %u\n",
464 submit_reloc.submit_offset);
469 /* offset in dwords: */
470 off = submit_reloc.submit_offset / 4;
472 if ((off >= (obj->base.size / 4)) ||
473 (off < last_offset)) {
474 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
479 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
486 iova += submit_reloc.reloc_offset;
488 if (submit_reloc.shift < 0)
489 iova >>= -submit_reloc.shift;
491 iova <<= submit_reloc.shift;
493 ptr[off] = iova | submit_reloc.or;
499 msm_gem_put_vaddr_locked(&obj->base);
504 /* Cleanup submit at end of ioctl. In the error case, this also drops
505 * references, unpins, and drops active refcnt. In the non-error case,
506 * this is done when the submit is retired.
508 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
510 unsigned cleanup_flags = BO_LOCKED;
514 cleanup_flags |= BO_PINNED | BO_ACTIVE;
516 for (i = 0; i < submit->nr_bos; i++) {
517 struct msm_gem_object *msm_obj = submit->bos[i].obj;
518 submit_cleanup_bo(submit, i, cleanup_flags);
520 drm_gem_object_put(&msm_obj->base);
524 void msm_submit_retire(struct msm_gem_submit *submit)
528 for (i = 0; i < submit->nr_bos; i++) {
529 struct drm_gem_object *obj = &submit->bos[i].obj->base;
532 submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
534 drm_gem_object_put(obj);
538 struct msm_submit_post_dep {
539 struct drm_syncobj *syncobj;
541 struct dma_fence_chain *chain;
544 static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
545 struct drm_file *file,
546 uint64_t in_syncobjs_addr,
547 uint32_t nr_in_syncobjs,
548 size_t syncobj_stride,
549 struct msm_ringbuffer *ring)
551 struct drm_syncobj **syncobjs = NULL;
552 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
556 syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
557 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
559 return ERR_PTR(-ENOMEM);
561 for (i = 0; i < nr_in_syncobjs; ++i) {
562 uint64_t address = in_syncobjs_addr + i * syncobj_stride;
563 struct dma_fence *fence;
565 if (copy_from_user(&syncobj_desc,
566 u64_to_user_ptr(address),
567 min(syncobj_stride, sizeof(syncobj_desc)))) {
572 if (syncobj_desc.point &&
573 !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
578 if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
583 ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
584 syncobj_desc.point, 0, &fence);
588 ret = drm_sched_job_add_dependency(&submit->base, fence);
592 if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
594 drm_syncobj_find(file, syncobj_desc.handle);
603 for (j = 0; j <= i; ++j) {
605 drm_syncobj_put(syncobjs[j]);
613 static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
614 uint32_t nr_syncobjs)
618 for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
620 drm_syncobj_replace_fence(syncobjs[i], NULL);
624 static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
625 struct drm_file *file,
626 uint64_t syncobjs_addr,
627 uint32_t nr_syncobjs,
628 size_t syncobj_stride)
630 struct msm_submit_post_dep *post_deps;
631 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
635 post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
636 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
638 return ERR_PTR(-ENOMEM);
640 for (i = 0; i < nr_syncobjs; ++i) {
641 uint64_t address = syncobjs_addr + i * syncobj_stride;
643 if (copy_from_user(&syncobj_desc,
644 u64_to_user_ptr(address),
645 min(syncobj_stride, sizeof(syncobj_desc)))) {
650 post_deps[i].point = syncobj_desc.point;
651 post_deps[i].chain = NULL;
653 if (syncobj_desc.flags) {
658 if (syncobj_desc.point) {
659 if (!drm_core_check_feature(dev,
660 DRIVER_SYNCOBJ_TIMELINE)) {
665 post_deps[i].chain = dma_fence_chain_alloc();
666 if (!post_deps[i].chain) {
672 post_deps[i].syncobj =
673 drm_syncobj_find(file, syncobj_desc.handle);
674 if (!post_deps[i].syncobj) {
681 for (j = 0; j <= i; ++j) {
682 dma_fence_chain_free(post_deps[j].chain);
683 if (post_deps[j].syncobj)
684 drm_syncobj_put(post_deps[j].syncobj);
694 static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
695 uint32_t count, struct dma_fence *fence)
699 for (i = 0; post_deps && i < count; ++i) {
700 if (post_deps[i].chain) {
701 drm_syncobj_add_point(post_deps[i].syncobj,
703 fence, post_deps[i].point);
704 post_deps[i].chain = NULL;
706 drm_syncobj_replace_fence(post_deps[i].syncobj,
712 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
713 struct drm_file *file)
715 static atomic_t ident = ATOMIC_INIT(0);
716 struct msm_drm_private *priv = dev->dev_private;
717 struct drm_msm_gem_submit *args = data;
718 struct msm_file_private *ctx = file->driver_priv;
719 struct msm_gem_submit *submit = NULL;
720 struct msm_gpu *gpu = priv->gpu;
721 struct msm_gpu_submitqueue *queue;
722 struct msm_ringbuffer *ring;
723 struct msm_submit_post_dep *post_deps = NULL;
724 struct drm_syncobj **syncobjs_to_reset = NULL;
725 int out_fence_fd = -1;
726 struct pid *pid = get_pid(task_pid(current));
727 bool has_ww_ticket = false;
737 if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
738 DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
742 /* for now, we just have 3d pipe.. eventually this would need to
743 * be more clever to dispatch to appropriate gpu module:
745 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
748 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
751 if (args->flags & MSM_SUBMIT_SUDO) {
752 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
753 !capable(CAP_SYS_RAWIO))
757 queue = msm_submitqueue_get(ctx, args->queueid);
761 /* Get a unique identifier for the submission for logging purposes */
762 submitid = atomic_inc_return(&ident) - 1;
764 ring = gpu->rb[queue->ring_nr];
765 trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
766 args->nr_bos, args->nr_cmds);
768 ret = mutex_lock_interruptible(&queue->lock);
770 goto out_post_unlock;
772 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
773 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
774 if (out_fence_fd < 0) {
780 submit = submit_create(dev, gpu, queue, args->nr_bos,
782 if (IS_ERR(submit)) {
783 ret = PTR_ERR(submit);
789 submit->ident = submitid;
791 if (args->flags & MSM_SUBMIT_SUDO)
792 submit->in_rb = true;
794 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
795 struct dma_fence *in_fence;
797 in_fence = sync_file_get_fence(args->fence_fd);
804 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
809 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
810 syncobjs_to_reset = msm_parse_deps(submit, file,
812 args->nr_in_syncobjs,
813 args->syncobj_stride, ring);
814 if (IS_ERR(syncobjs_to_reset)) {
815 ret = PTR_ERR(syncobjs_to_reset);
820 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
821 post_deps = msm_parse_post_deps(dev, file,
823 args->nr_out_syncobjs,
824 args->syncobj_stride);
825 if (IS_ERR(post_deps)) {
826 ret = PTR_ERR(post_deps);
831 ret = submit_lookup_objects(submit, args, file);
835 ret = submit_lookup_cmds(submit, args, file);
839 /* copy_*_user while holding a ww ticket upsets lockdep */
840 ww_acquire_init(&submit->ticket, &reservation_ww_class);
841 has_ww_ticket = true;
842 ret = submit_lock_objects(submit);
846 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
850 ret = submit_pin_objects(submit);
854 for (i = 0; i < args->nr_cmds; i++) {
855 struct msm_gem_object *msm_obj;
858 ret = submit_bo(submit, submit->cmd[i].idx,
859 &msm_obj, &iova, NULL);
863 if (!submit->cmd[i].size ||
864 ((submit->cmd[i].size + submit->cmd[i].offset) >
865 msm_obj->base.size / 4)) {
866 DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
871 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
876 ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
877 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
885 * If using userspace provided seqno fence, validate that the id
886 * is available before arming sched job. Since access to fence_idr
887 * is serialized on the queue lock, the slot should be still avail
888 * after the job is armed
890 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
891 idr_find(&queue->fence_idr, args->fence)) {
896 drm_sched_job_arm(&submit->base);
898 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
900 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
902 * Userspace has assigned the seqno fence that it wants
903 * us to use. It is an error to pick a fence sequence
904 * number that is not available.
906 submit->fence_id = args->fence;
907 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
908 &submit->fence_id, submit->fence_id,
911 * We've already validated that the fence_id slot is valid,
912 * so if idr_alloc_u32 failed, it is a kernel bug
917 * Allocate an id which can be used by WAIT_FENCE ioctl to map
918 * back to the underlying fence.
920 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
921 submit->user_fence, 1,
922 INT_MAX, GFP_KERNEL);
924 if (submit->fence_id < 0) {
925 ret = submit->fence_id = 0;
926 submit->fence_id = 0;
929 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
930 struct sync_file *sync_file = sync_file_create(submit->user_fence);
934 fd_install(out_fence_fd, sync_file->file);
935 args->fence_fd = out_fence_fd;
939 submit_attach_object_fences(submit);
941 /* The scheduler owns a ref now: */
942 msm_gem_submit_get(submit);
944 drm_sched_entity_push_job(&submit->base);
946 args->fence = submit->fence_id;
947 queue->last_fence = submit->fence_id;
949 msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
950 msm_process_post_deps(post_deps, args->nr_out_syncobjs,
955 submit_cleanup(submit, !!ret);
957 ww_acquire_fini(&submit->ticket);
959 if (ret && (out_fence_fd >= 0))
960 put_unused_fd(out_fence_fd);
961 mutex_unlock(&queue->lock);
963 msm_gem_submit_put(submit);
965 if (!IS_ERR_OR_NULL(post_deps)) {
966 for (i = 0; i < args->nr_out_syncobjs; ++i) {
967 kfree(post_deps[i].chain);
968 drm_syncobj_put(post_deps[i].syncobj);
973 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
974 for (i = 0; i < args->nr_in_syncobjs; ++i) {
975 if (syncobjs_to_reset[i])
976 drm_syncobj_put(syncobjs_to_reset[i]);
978 kfree(syncobjs_to_reset);