1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright © 2022 Intel Corporation
7 #define TRACE_SYSTEM xe
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
16 #include "xe_bo_types.h"
17 #include "xe_exec_queue_types.h"
18 #include "xe_gpu_scheduler_types.h"
19 #include "xe_gt_tlb_invalidation_types.h"
20 #include "xe_gt_types.h"
21 #include "xe_guc_exec_queue_types.h"
22 #include "xe_sched_job.h"
25 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
26 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
30 __field(struct xe_gt_tlb_invalidation_fence *, fence)
35 __entry->fence = fence;
36 __entry->seqno = fence->seqno;
39 TP_printk("fence=%p, seqno=%d",
40 __entry->fence, __entry->seqno)
43 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
44 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
48 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
49 xe_gt_tlb_invalidation_fence_work_func,
50 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
54 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
55 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
59 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
60 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
64 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
65 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
69 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
70 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
74 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
75 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
79 DECLARE_EVENT_CLASS(xe_bo,
80 TP_PROTO(struct xe_bo *bo),
86 __field(struct xe_vm *, vm)
90 __entry->size = bo->size;
91 __entry->flags = bo->flags;
95 TP_printk("size=%zu, flags=0x%02x, vm=%p",
96 __entry->size, __entry->flags, __entry->vm)
99 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
100 TP_PROTO(struct xe_bo *bo),
104 TRACE_EVENT(xe_bo_move,
105 TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
106 bool move_lacks_source),
107 TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
109 __field(struct xe_bo *, bo)
110 __field(size_t, size)
111 __field(u32, new_placement)
112 __field(u32, old_placement)
113 __array(char, device_id, 12)
114 __field(bool, move_lacks_source)
119 __entry->size = bo->size;
120 __entry->new_placement = new_placement;
121 __entry->old_placement = old_placement;
122 strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
123 __entry->move_lacks_source = move_lacks_source;
125 TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
126 __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
127 xe_mem_type_to_name[__entry->old_placement],
128 xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
131 DECLARE_EVENT_CLASS(xe_exec_queue,
132 TP_PROTO(struct xe_exec_queue *q),
136 __field(enum xe_engine_class, class)
137 __field(u32, logical_mask)
141 __field(u32, guc_state)
146 __entry->class = q->class;
147 __entry->logical_mask = q->logical_mask;
148 __entry->gt_id = q->gt->info.id;
149 __entry->width = q->width;
150 __entry->guc_id = q->guc->id;
151 __entry->guc_state = atomic_read(&q->guc->state);
152 __entry->flags = q->flags;
155 TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
156 __entry->class, __entry->logical_mask,
157 __entry->gt_id, __entry->width, __entry->guc_id,
158 __entry->guc_state, __entry->flags)
161 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
162 TP_PROTO(struct xe_exec_queue *q),
166 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
167 TP_PROTO(struct xe_exec_queue *q),
171 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
172 TP_PROTO(struct xe_exec_queue *q),
176 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
177 TP_PROTO(struct xe_exec_queue *q),
181 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
182 TP_PROTO(struct xe_exec_queue *q),
186 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
187 TP_PROTO(struct xe_exec_queue *q),
191 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
192 TP_PROTO(struct xe_exec_queue *q),
196 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
197 TP_PROTO(struct xe_exec_queue *q),
201 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
202 TP_PROTO(struct xe_exec_queue *q),
206 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
207 TP_PROTO(struct xe_exec_queue *q),
211 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
212 TP_PROTO(struct xe_exec_queue *q),
216 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
217 TP_PROTO(struct xe_exec_queue *q),
221 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
222 TP_PROTO(struct xe_exec_queue *q),
226 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
227 TP_PROTO(struct xe_exec_queue *q),
231 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
232 TP_PROTO(struct xe_exec_queue *q),
236 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
237 TP_PROTO(struct xe_exec_queue *q),
241 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
242 TP_PROTO(struct xe_exec_queue *q),
246 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
247 TP_PROTO(struct xe_exec_queue *q),
251 DECLARE_EVENT_CLASS(xe_sched_job,
252 TP_PROTO(struct xe_sched_job *job),
258 __field(u32, guc_state)
262 __field(u64, batch_addr)
266 __entry->seqno = xe_sched_job_seqno(job);
267 __entry->guc_id = job->q->guc->id;
269 atomic_read(&job->q->guc->state);
270 __entry->flags = job->q->flags;
271 __entry->error = job->fence->error;
272 __entry->fence = (unsigned long)job->fence;
273 __entry->batch_addr = (u64)job->batch_addr[0];
276 TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
277 __entry->fence, __entry->seqno, __entry->guc_id,
278 __entry->batch_addr, __entry->guc_state,
279 __entry->flags, __entry->error)
282 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
283 TP_PROTO(struct xe_sched_job *job),
287 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
288 TP_PROTO(struct xe_sched_job *job),
292 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
293 TP_PROTO(struct xe_sched_job *job),
297 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
298 TP_PROTO(struct xe_sched_job *job),
302 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
303 TP_PROTO(struct xe_sched_job *job),
307 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
308 TP_PROTO(struct xe_sched_job *job),
312 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
313 TP_PROTO(struct xe_sched_job *job),
317 DECLARE_EVENT_CLASS(xe_sched_msg,
318 TP_PROTO(struct xe_sched_msg *msg),
327 __entry->opcode = msg->opcode;
329 ((struct xe_exec_queue *)msg->private_data)->guc->id;
332 TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
336 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
337 TP_PROTO(struct xe_sched_msg *msg),
341 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
342 TP_PROTO(struct xe_sched_msg *msg),
346 DECLARE_EVENT_CLASS(xe_hw_fence,
347 TP_PROTO(struct xe_hw_fence *fence),
353 __field(struct xe_hw_fence *, fence)
357 __entry->ctx = fence->dma.context;
358 __entry->seqno = fence->dma.seqno;
359 __entry->fence = fence;
362 TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
363 __entry->ctx, __entry->fence, __entry->seqno)
366 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
367 TP_PROTO(struct xe_hw_fence *fence),
371 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
372 TP_PROTO(struct xe_hw_fence *fence),
376 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
377 TP_PROTO(struct xe_hw_fence *fence),
381 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
382 TP_PROTO(struct xe_hw_fence *fence),
386 DECLARE_EVENT_CLASS(xe_vma,
387 TP_PROTO(struct xe_vma *vma),
391 __field(struct xe_vma *, vma)
400 __entry->asid = xe_vma_vm(vma)->usm.asid;
401 __entry->start = xe_vma_start(vma);
402 __entry->end = xe_vma_end(vma) - 1;
403 __entry->ptr = xe_vma_userptr(vma);
406 TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
407 __entry->vma, __entry->asid, __entry->start,
408 __entry->end, __entry->ptr)
411 DEFINE_EVENT(xe_vma, xe_vma_flush,
412 TP_PROTO(struct xe_vma *vma),
416 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
417 TP_PROTO(struct xe_vma *vma),
421 DEFINE_EVENT(xe_vma, xe_vma_acc,
422 TP_PROTO(struct xe_vma *vma),
426 DEFINE_EVENT(xe_vma, xe_vma_fail,
427 TP_PROTO(struct xe_vma *vma),
431 DEFINE_EVENT(xe_vma, xe_vma_bind,
432 TP_PROTO(struct xe_vma *vma),
436 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
437 TP_PROTO(struct xe_vma *vma),
441 DEFINE_EVENT(xe_vma, xe_vma_unbind,
442 TP_PROTO(struct xe_vma *vma),
446 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
447 TP_PROTO(struct xe_vma *vma),
451 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
452 TP_PROTO(struct xe_vma *vma),
456 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
457 TP_PROTO(struct xe_vma *vma),
461 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
462 TP_PROTO(struct xe_vma *vma),
466 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
467 TP_PROTO(struct xe_vma *vma),
471 DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
472 TP_PROTO(struct xe_vma *vma),
476 DEFINE_EVENT(xe_vma, xe_vma_evict,
477 TP_PROTO(struct xe_vma *vma),
481 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
482 TP_PROTO(struct xe_vma *vma),
486 DECLARE_EVENT_CLASS(xe_vm,
487 TP_PROTO(struct xe_vm *vm),
491 __field(struct xe_vm *, vm)
497 __entry->asid = vm->usm.asid;
500 TP_printk("vm=%p, asid=0x%05x", __entry->vm,
504 DEFINE_EVENT(xe_vm, xe_vm_kill,
505 TP_PROTO(struct xe_vm *vm),
509 DEFINE_EVENT(xe_vm, xe_vm_create,
510 TP_PROTO(struct xe_vm *vm),
514 DEFINE_EVENT(xe_vm, xe_vm_free,
515 TP_PROTO(struct xe_vm *vm),
519 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
520 TP_PROTO(struct xe_vm *vm),
524 DEFINE_EVENT(xe_vm, xe_vm_restart,
525 TP_PROTO(struct xe_vm *vm),
529 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
530 TP_PROTO(struct xe_vm *vm),
534 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
535 TP_PROTO(struct xe_vm *vm),
539 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
540 TP_PROTO(struct xe_vm *vm),
545 DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
546 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
547 TP_ARGS(_head, _tail, size, space, len),
558 __entry->_head = _head;
559 __entry->_tail = _tail;
560 __entry->size = size;
561 __entry->space = space;
565 TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
566 __entry->_head, __entry->_tail, __entry->size,
567 __entry->space, __entry->len)
570 DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
571 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
572 TP_ARGS(_head, _tail, size, space, len)
575 DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
576 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
577 TP_ARGS(_head, _tail, size, space, len),
579 TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
580 __entry->_head, __entry->_tail, __entry->size,
581 __entry->space, __entry->len)
584 DECLARE_EVENT_CLASS(xe_guc_ctb,
585 TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
586 TP_ARGS(gt_id, action, len, _head, tail),
597 __entry->gt_id = gt_id;
598 __entry->action = action;
600 __entry->tail = tail;
601 __entry->_head = _head;
604 TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
605 __entry->gt_id, __entry->action, __entry->len,
606 __entry->tail, __entry->_head)
609 DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
610 TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
611 TP_ARGS(gt_id, action, len, _head, tail)
614 DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
615 TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
616 TP_ARGS(gt_id, action, len, _head, tail),
618 TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
619 __entry->gt_id, __entry->action, __entry->len,
620 __entry->tail, __entry->_head)
626 /* This part must be outside protection */
627 #undef TRACE_INCLUDE_PATH
628 #undef TRACE_INCLUDE_FILE
629 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
630 #define TRACE_INCLUDE_FILE xe_trace
631 #include <trace/define_trace.h>