drm/virtio: Conditionally allocate virtio_gpu_fence
authorGurchetan Singh <gurchetansingh@chromium.org>
Fri, 7 Jul 2023 21:31:24 +0000 (14:31 -0700)
committerDmitry Osipenko <dmitry.osipenko@collabora.com>
Sun, 9 Jul 2023 20:30:50 +0000 (23:30 +0300)
We don't want to create a fence for every command submission.  It's
only necessary when userspace provides a waitable token for submission.
This could be:

1) bo_handles, to be used with VIRTGPU_WAIT
2) out_fence_fd, to be used with dma_fence apis
3) a ring_idx provided with VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK
   + DRM event API
4) syncobjs in the future

The use case for just submitting a command to the host, and expecting
no response.  For example, gfxstream has GFXSTREAM_CONTEXT_PING that
just wakes up the host side worker threads.  There's also
CROSS_DOMAIN_CMD_SEND which just sends data to the Wayland server.

This prevents the need to signal the automatically created
virtio_gpu_fence.

In addition, VIRTGPU_EXECBUF_RING_IDX is checked when creating a
DRM event object.  VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is
already defined in terms of per-context rings.  It was theoretically
possible to create a DRM event on the global timeline (ring_idx == 0),
if the context enabled DRM event polling.  However, that wouldn't
work and userspace (Sommelier).  Explicitly disallow it for
clarity.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> # edited coding style
Link: https://patchwork.freedesktop.org/patch/msgid/20230707213124.494-1-gurchetansingh@chromium.org
drivers/gpu/drm/virtio/virtgpu_submit.c

index cf3c04b16a7a8f7a813f1ccb91fc58d1d9767a02..1d010c66910d80ade86b54bdd534ead72f45e5c9 100644 (file)
@@ -64,13 +64,9 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
                                         struct virtio_gpu_fence *fence,
                                         u32 ring_idx)
 {
-       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct virtio_gpu_fence_event *e = NULL;
        int ret;
 
-       if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
-               return 0;
-
        e = kzalloc(sizeof(*e), GFP_KERNEL);
        if (!e)
                return -ENOMEM;
@@ -164,18 +160,30 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fence *out_fence;
+       bool drm_fence_event;
        int err;
 
        memset(submit, 0, sizeof(*submit));
 
-       out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
-       if (!out_fence)
-               return -ENOMEM;
-
-       err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
-       if (err) {
-               dma_fence_put(&out_fence->f);
-               return err;
+       if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
+           (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+               drm_fence_event = true;
+       else
+               drm_fence_event = false;
+
+       if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+           exbuf->num_bo_handles ||
+           drm_fence_event)
+               out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+       else
+               out_fence = NULL;
+
+       if (drm_fence_event) {
+               err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+               if (err) {
+                       dma_fence_put(&out_fence->f);
+                       return err;
+               }
        }
 
        submit->out_fence = out_fence;