drm/virtio: Spiff out cmd queue/response traces
authorRob Clark <robdclark@chromium.org>
Wed, 30 Nov 2022 00:08:41 +0000 (16:08 -0800)
committerDmitry Osipenko <dmitry.osipenko@collabora.com>
Mon, 2 Jan 2023 14:51:27 +0000 (17:51 +0300)
Add a sequence # for more easily matching up cmd/resp, and the # of free
slots in the virtqueue to more easily see starvation issues.

v2: Fix handling of string fields as well

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221130000841.318037-1-robdclark@gmail.com
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_trace.h
drivers/gpu/drm/virtio/virtgpu_vq.c

index b7a64c7dcc2c93e17a123fb30de8d2643df8e212..af6ffb696086698bd9a9327656a8decc9bd7976e 100644 (file)
@@ -165,6 +165,8 @@ struct virtio_gpu_vbuffer {
 
        struct virtio_gpu_object_array *objs;
        struct list_head list;
+
+       uint32_t seqno;
 };
 
 struct virtio_gpu_output {
@@ -194,6 +196,7 @@ struct virtio_gpu_queue {
        spinlock_t qlock;
        wait_queue_head_t ack_queue;
        struct work_struct dequeue_work;
+       uint32_t seqno;
 };
 
 struct virtio_gpu_drv_capset {
index 711ecc2bd241ebc58f49554f47a2ca8148828021..031bc77689d5104b2d6e9882678c6984fd29b0e8 100644 (file)
@@ -9,40 +9,44 @@
 #define TRACE_INCLUDE_FILE virtgpu_trace
 
 DECLARE_EVENT_CLASS(virtio_gpu_cmd,
-       TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
-       TP_ARGS(vq, hdr),
+       TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+       TP_ARGS(vq, hdr, seqno),
        TP_STRUCT__entry(
                         __field(int, dev)
                         __field(unsigned int, vq)
-                        __field(const char *, name)
+                        __string(name, vq->name)
                         __field(u32, type)
                         __field(u32, flags)
                         __field(u64, fence_id)
                         __field(u32, ctx_id)
+                        __field(u32, num_free)
+                        __field(u32, seqno)
                         ),
        TP_fast_assign(
                       __entry->dev = vq->vdev->index;
                       __entry->vq = vq->index;
-                      __entry->name = vq->name;
+                      __assign_str(name, vq->name);
                       __entry->type = le32_to_cpu(hdr->type);
                       __entry->flags = le32_to_cpu(hdr->flags);
                       __entry->fence_id = le64_to_cpu(hdr->fence_id);
                       __entry->ctx_id = le32_to_cpu(hdr->ctx_id);
+                      __entry->num_free = vq->num_free;
+                      __entry->seqno = seqno;
                       ),
-       TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
-                 __entry->dev, __entry->vq, __entry->name,
+       TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u num_free=%u seqno=%u",
+                 __entry->dev, __entry->vq, __get_str(name),
                  __entry->type, __entry->flags, __entry->fence_id,
-                 __entry->ctx_id)
+                 __entry->ctx_id, __entry->num_free, __entry->seqno)
 );
 
 DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
-       TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
-       TP_ARGS(vq, hdr)
+       TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+       TP_ARGS(vq, hdr, seqno)
 );
 
 DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
-       TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
-       TP_ARGS(vq, hdr)
+       TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+       TP_ARGS(vq, hdr, seqno)
 );
 
 #endif
index 9ff8660b50ade507e9d357c10ecdf61d10cfbede..a04a9b20896dcab4c9baebf509be50a9422f3ba2 100644 (file)
@@ -215,7 +215,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
        list_for_each_entry(entry, &reclaim_list, list) {
                resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
 
-               trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+               trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
 
                if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
                        if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
@@ -261,6 +261,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
        spin_unlock(&vgdev->cursorq.qlock);
 
        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+               struct virtio_gpu_ctrl_hdr *resp =
+                       (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+               trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
                list_del(&entry->list);
                free_vbuf(vgdev, entry);
        }
@@ -353,7 +357,8 @@ again:
        ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
        WARN_ON(ret);
 
-       trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+       vbuf->seqno = ++vgdev->ctrlq.seqno;
+       trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
 
        atomic_inc(&vgdev->pending_commands);
 
@@ -465,8 +470,10 @@ retry:
                spin_lock(&vgdev->cursorq.qlock);
                goto retry;
        } else {
+               vbuf->seqno = ++vgdev->cursorq.seqno;
                trace_virtio_gpu_cmd_queue(vq,
-                       virtio_gpu_vbuf_ctrl_hdr(vbuf));
+                       virtio_gpu_vbuf_ctrl_hdr(vbuf),
+                       vbuf->seqno);
 
                notify = virtqueue_kick_prepare(vq);
        }