drm/virtio: Import prime buffers from other devices as guest blobs
authorVivek Kasireddy <vivek.kasireddy@intel.com>
Tue, 26 Nov 2024 03:13:45 +0000 (19:13 -0800)
committerDmitry Osipenko <dmitry.osipenko@collabora.com>
Tue, 26 Nov 2024 10:27:15 +0000 (13:27 +0300)
By importing scanout buffers from other devices, we should be able
to use the virtio-gpu driver in KMS only mode. Note that we attach
dynamically and register a move_notify() callback so that we can
let the VMM know of any location changes associated with the backing
store of the imported object by sending detach_backing cmd.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gurchetan Singh <gurchetansingh@chromium.org>
Cc: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
[dmitry.osipenko@collabora.com: added kref check to move_notify]
Link: https://patchwork.freedesktop.org/patch/msgid/20241126031643.3490496-5-vivek.kasireddy@intel.com
drivers/gpu/drm/virtio/virtgpu_prime.c

index 887b635dec6fcf7babb67ca751b948ffd37aed4a..688810d1b6112450f0f0bc69d1bac6a344a67c59 100644 (file)
@@ -189,13 +189,18 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
        struct virtio_gpu_device *vgdev = obj->dev->dev_private;
        struct dma_buf_attachment *attach = obj->import_attach;
+       struct dma_resv *resv = attach->dmabuf->resv;
 
        if (attach) {
+               dma_resv_lock(resv, NULL);
+
                virtio_gpu_detach_object_fenced(bo);
 
                if (bo->sgt)
-                       dma_buf_unmap_attachment_unlocked(attach, bo->sgt,
-                                                         DMA_BIDIRECTIONAL);
+                       dma_buf_unmap_attachment(attach, bo->sgt,
+                                                DMA_BIDIRECTIONAL);
+
+               dma_resv_unlock(resv);
 
                dma_buf_detach(attach->dmabuf, attach);
                dma_buf_put(attach->dmabuf);
@@ -259,10 +264,39 @@ err_pin:
        return ret;
 }
 
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+       .free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+       struct drm_gem_object *obj = attach->importer_priv;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+       if (bo->created && kref_read(&obj->refcount)) {
+               virtio_gpu_detach_object_fenced(bo);
+
+               if (bo->sgt)
+                       dma_buf_unmap_attachment(attach, bo->sgt,
+                                                DMA_BIDIRECTIONAL);
+
+               bo->sgt = NULL;
+       }
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+       .allow_peer2peer = true,
+       .move_notify = virtgpu_dma_buf_move_notify
+};
+
 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
                                                struct dma_buf *buf)
 {
+       struct virtio_gpu_device *vgdev = dev->dev_private;
+       struct dma_buf_attachment *attach;
+       struct virtio_gpu_object *bo;
        struct drm_gem_object *obj;
+       int ret;
 
        if (buf->ops == &virtgpu_dmabuf_ops.ops) {
                obj = buf->priv;
@@ -276,7 +310,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
                }
        }
 
-       return drm_gem_prime_import(dev, buf);
+       if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+               return drm_gem_prime_import(dev, buf);
+
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+       if (!bo)
+               return ERR_PTR(-ENOMEM);
+
+       obj = &bo->base.base;
+       obj->funcs = &virtgpu_gem_dma_buf_funcs;
+       drm_gem_private_object_init(dev, obj, buf->size);
+
+       attach = dma_buf_dynamic_attach(buf, dev->dev,
+                                       &virtgpu_dma_buf_attach_ops, obj);
+       if (IS_ERR(attach)) {
+               kfree(bo);
+               return ERR_CAST(attach);
+       }
+
+       obj->import_attach = attach;
+       get_dma_buf(buf);
+
+       ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       return obj;
 }
 
 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(