x86/insn: Directly assign x86_64 state in insn_init()
[linux-2.6-block.git] / drivers / gpu / drm / xe / xe_trace.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
14
15 #include "xe_bo_types.h"
16 #include "xe_exec_queue_types.h"
17 #include "xe_gpu_scheduler_types.h"
18 #include "xe_gt_tlb_invalidation_types.h"
19 #include "xe_gt_types.h"
20 #include "xe_guc_exec_queue_types.h"
21 #include "xe_sched_job.h"
22 #include "xe_vm.h"
23
24 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
25                     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
26                     TP_ARGS(fence),
27
28                     TP_STRUCT__entry(
29                              __field(u64, fence)
30                              __field(int, seqno)
31                              ),
32
33                     TP_fast_assign(
34                            __entry->fence = (u64)fence;
35                            __entry->seqno = fence->seqno;
36                            ),
37
38                     TP_printk("fence=0x%016llx, seqno=%d",
39                               __entry->fence, __entry->seqno)
40 );
41
42 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
43              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
44              TP_ARGS(fence)
45 );
46
47 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
48              xe_gt_tlb_invalidation_fence_work_func,
49              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
50              TP_ARGS(fence)
51 );
52
53 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
54              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
55              TP_ARGS(fence)
56 );
57
58 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
59              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
60              TP_ARGS(fence)
61 );
62
63 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
64              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
65              TP_ARGS(fence)
66 );
67
68 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
69              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
70              TP_ARGS(fence)
71 );
72
73 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
74              TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
75              TP_ARGS(fence)
76 );
77
78 DECLARE_EVENT_CLASS(xe_bo,
79                     TP_PROTO(struct xe_bo *bo),
80                     TP_ARGS(bo),
81
82                     TP_STRUCT__entry(
83                              __field(size_t, size)
84                              __field(u32, flags)
85                              __field(u64, vm)
86                              ),
87
88                     TP_fast_assign(
89                            __entry->size = bo->size;
90                            __entry->flags = bo->flags;
91                            __entry->vm = (unsigned long)bo->vm;
92                            ),
93
94                     TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
95                               __entry->size, __entry->flags, __entry->vm)
96 );
97
98 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
99              TP_PROTO(struct xe_bo *bo),
100              TP_ARGS(bo)
101 );
102
103 DEFINE_EVENT(xe_bo, xe_bo_move,
104              TP_PROTO(struct xe_bo *bo),
105              TP_ARGS(bo)
106 );
107
108 DECLARE_EVENT_CLASS(xe_exec_queue,
109                     TP_PROTO(struct xe_exec_queue *q),
110                     TP_ARGS(q),
111
112                     TP_STRUCT__entry(
113                              __field(enum xe_engine_class, class)
114                              __field(u32, logical_mask)
115                              __field(u8, gt_id)
116                              __field(u16, width)
117                              __field(u16, guc_id)
118                              __field(u32, guc_state)
119                              __field(u32, flags)
120                              ),
121
122                     TP_fast_assign(
123                            __entry->class = q->class;
124                            __entry->logical_mask = q->logical_mask;
125                            __entry->gt_id = q->gt->info.id;
126                            __entry->width = q->width;
127                            __entry->guc_id = q->guc->id;
128                            __entry->guc_state = atomic_read(&q->guc->state);
129                            __entry->flags = q->flags;
130                            ),
131
132                     TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
133                               __entry->class, __entry->logical_mask,
134                               __entry->gt_id, __entry->width, __entry->guc_id,
135                               __entry->guc_state, __entry->flags)
136 );
137
138 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
139              TP_PROTO(struct xe_exec_queue *q),
140              TP_ARGS(q)
141 );
142
143 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
144              TP_PROTO(struct xe_exec_queue *q),
145              TP_ARGS(q)
146 );
147
148 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
149              TP_PROTO(struct xe_exec_queue *q),
150              TP_ARGS(q)
151 );
152
153 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
154              TP_PROTO(struct xe_exec_queue *q),
155              TP_ARGS(q)
156 );
157
158 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
159              TP_PROTO(struct xe_exec_queue *q),
160              TP_ARGS(q)
161 );
162
163 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
164              TP_PROTO(struct xe_exec_queue *q),
165              TP_ARGS(q)
166 );
167
168 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
169              TP_PROTO(struct xe_exec_queue *q),
170              TP_ARGS(q)
171 );
172
173 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
174              TP_PROTO(struct xe_exec_queue *q),
175              TP_ARGS(q)
176 );
177
178 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
179              TP_PROTO(struct xe_exec_queue *q),
180              TP_ARGS(q)
181 );
182
183 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
184              TP_PROTO(struct xe_exec_queue *q),
185              TP_ARGS(q)
186 );
187
188 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
189              TP_PROTO(struct xe_exec_queue *q),
190              TP_ARGS(q)
191 );
192
193 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
194              TP_PROTO(struct xe_exec_queue *q),
195              TP_ARGS(q)
196 );
197
198 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
199              TP_PROTO(struct xe_exec_queue *q),
200              TP_ARGS(q)
201 );
202
203 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
204              TP_PROTO(struct xe_exec_queue *q),
205              TP_ARGS(q)
206 );
207
208 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
209              TP_PROTO(struct xe_exec_queue *q),
210              TP_ARGS(q)
211 );
212
213 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
214              TP_PROTO(struct xe_exec_queue *q),
215              TP_ARGS(q)
216 );
217
218 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
219              TP_PROTO(struct xe_exec_queue *q),
220              TP_ARGS(q)
221 );
222
223 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
224              TP_PROTO(struct xe_exec_queue *q),
225              TP_ARGS(q)
226 );
227
228 DECLARE_EVENT_CLASS(xe_sched_job,
229                     TP_PROTO(struct xe_sched_job *job),
230                     TP_ARGS(job),
231
232                     TP_STRUCT__entry(
233                              __field(u32, seqno)
234                              __field(u16, guc_id)
235                              __field(u32, guc_state)
236                              __field(u32, flags)
237                              __field(int, error)
238                              __field(u64, fence)
239                              __field(u64, batch_addr)
240                              ),
241
242                     TP_fast_assign(
243                            __entry->seqno = xe_sched_job_seqno(job);
244                            __entry->guc_id = job->q->guc->id;
245                            __entry->guc_state =
246                            atomic_read(&job->q->guc->state);
247                            __entry->flags = job->q->flags;
248                            __entry->error = job->fence->error;
249                            __entry->fence = (unsigned long)job->fence;
250                            __entry->batch_addr = (u64)job->batch_addr[0];
251                            ),
252
253                     TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
254                               __entry->fence, __entry->seqno, __entry->guc_id,
255                               __entry->batch_addr, __entry->guc_state,
256                               __entry->flags, __entry->error)
257 );
258
259 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
260              TP_PROTO(struct xe_sched_job *job),
261              TP_ARGS(job)
262 );
263
264 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
265              TP_PROTO(struct xe_sched_job *job),
266              TP_ARGS(job)
267 );
268
269 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
270              TP_PROTO(struct xe_sched_job *job),
271              TP_ARGS(job)
272 );
273
274 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
275              TP_PROTO(struct xe_sched_job *job),
276              TP_ARGS(job)
277 );
278
279 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
280              TP_PROTO(struct xe_sched_job *job),
281              TP_ARGS(job)
282 );
283
284 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
285              TP_PROTO(struct xe_sched_job *job),
286              TP_ARGS(job)
287 );
288
289 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
290              TP_PROTO(struct xe_sched_job *job),
291              TP_ARGS(job)
292 );
293
294 DECLARE_EVENT_CLASS(xe_sched_msg,
295                     TP_PROTO(struct xe_sched_msg *msg),
296                     TP_ARGS(msg),
297
298                     TP_STRUCT__entry(
299                              __field(u32, opcode)
300                              __field(u16, guc_id)
301                              ),
302
303                     TP_fast_assign(
304                            __entry->opcode = msg->opcode;
305                            __entry->guc_id =
306                            ((struct xe_exec_queue *)msg->private_data)->guc->id;
307                            ),
308
309                     TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
310                               __entry->opcode)
311 );
312
313 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
314              TP_PROTO(struct xe_sched_msg *msg),
315              TP_ARGS(msg)
316 );
317
318 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
319              TP_PROTO(struct xe_sched_msg *msg),
320              TP_ARGS(msg)
321 );
322
323 DECLARE_EVENT_CLASS(xe_hw_fence,
324                     TP_PROTO(struct xe_hw_fence *fence),
325                     TP_ARGS(fence),
326
327                     TP_STRUCT__entry(
328                              __field(u64, ctx)
329                              __field(u32, seqno)
330                              __field(u64, fence)
331                              ),
332
333                     TP_fast_assign(
334                            __entry->ctx = fence->dma.context;
335                            __entry->seqno = fence->dma.seqno;
336                            __entry->fence = (unsigned long)fence;
337                            ),
338
339                     TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
340                               __entry->ctx, __entry->fence, __entry->seqno)
341 );
342
343 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
344              TP_PROTO(struct xe_hw_fence *fence),
345              TP_ARGS(fence)
346 );
347
348 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
349              TP_PROTO(struct xe_hw_fence *fence),
350              TP_ARGS(fence)
351 );
352
353 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
354              TP_PROTO(struct xe_hw_fence *fence),
355              TP_ARGS(fence)
356 );
357
358 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
359              TP_PROTO(struct xe_hw_fence *fence),
360              TP_ARGS(fence)
361 );
362
363 DECLARE_EVENT_CLASS(xe_vma,
364                     TP_PROTO(struct xe_vma *vma),
365                     TP_ARGS(vma),
366
367                     TP_STRUCT__entry(
368                              __field(u64, vma)
369                              __field(u32, asid)
370                              __field(u64, start)
371                              __field(u64, end)
372                              __field(u64, ptr)
373                              ),
374
375                     TP_fast_assign(
376                            __entry->vma = (unsigned long)vma;
377                            __entry->asid = xe_vma_vm(vma)->usm.asid;
378                            __entry->start = xe_vma_start(vma);
379                            __entry->end = xe_vma_end(vma) - 1;
380                            __entry->ptr = xe_vma_userptr(vma);
381                            ),
382
383                     TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
384                               __entry->vma, __entry->asid, __entry->start,
385                               __entry->end, __entry->ptr)
386 )
387
388 DEFINE_EVENT(xe_vma, xe_vma_flush,
389              TP_PROTO(struct xe_vma *vma),
390              TP_ARGS(vma)
391 );
392
393 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
394              TP_PROTO(struct xe_vma *vma),
395              TP_ARGS(vma)
396 );
397
398 DEFINE_EVENT(xe_vma, xe_vma_acc,
399              TP_PROTO(struct xe_vma *vma),
400              TP_ARGS(vma)
401 );
402
403 DEFINE_EVENT(xe_vma, xe_vma_fail,
404              TP_PROTO(struct xe_vma *vma),
405              TP_ARGS(vma)
406 );
407
408 DEFINE_EVENT(xe_vma, xe_vma_bind,
409              TP_PROTO(struct xe_vma *vma),
410              TP_ARGS(vma)
411 );
412
413 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
414              TP_PROTO(struct xe_vma *vma),
415              TP_ARGS(vma)
416 );
417
418 DEFINE_EVENT(xe_vma, xe_vma_unbind,
419              TP_PROTO(struct xe_vma *vma),
420              TP_ARGS(vma)
421 );
422
423 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
424              TP_PROTO(struct xe_vma *vma),
425              TP_ARGS(vma)
426 );
427
428 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
429              TP_PROTO(struct xe_vma *vma),
430              TP_ARGS(vma)
431 );
432
433 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
434              TP_PROTO(struct xe_vma *vma),
435              TP_ARGS(vma)
436 );
437
438 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
439              TP_PROTO(struct xe_vma *vma),
440              TP_ARGS(vma)
441 );
442
443 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
444              TP_PROTO(struct xe_vma *vma),
445              TP_ARGS(vma)
446 );
447
448 DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
449              TP_PROTO(struct xe_vma *vma),
450              TP_ARGS(vma)
451 );
452
453 DEFINE_EVENT(xe_vma, xe_vma_evict,
454              TP_PROTO(struct xe_vma *vma),
455              TP_ARGS(vma)
456 );
457
458 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
459              TP_PROTO(struct xe_vma *vma),
460              TP_ARGS(vma)
461 );
462
463 DECLARE_EVENT_CLASS(xe_vm,
464                     TP_PROTO(struct xe_vm *vm),
465                     TP_ARGS(vm),
466
467                     TP_STRUCT__entry(
468                              __field(u64, vm)
469                              __field(u32, asid)
470                              ),
471
472                     TP_fast_assign(
473                            __entry->vm = (unsigned long)vm;
474                            __entry->asid = vm->usm.asid;
475                            ),
476
477                     TP_printk("vm=0x%016llx, asid=0x%05x",  __entry->vm,
478                               __entry->asid)
479 );
480
481 DEFINE_EVENT(xe_vm, xe_vm_kill,
482              TP_PROTO(struct xe_vm *vm),
483              TP_ARGS(vm)
484 );
485
486 DEFINE_EVENT(xe_vm, xe_vm_create,
487              TP_PROTO(struct xe_vm *vm),
488              TP_ARGS(vm)
489 );
490
491 DEFINE_EVENT(xe_vm, xe_vm_free,
492              TP_PROTO(struct xe_vm *vm),
493              TP_ARGS(vm)
494 );
495
496 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
497              TP_PROTO(struct xe_vm *vm),
498              TP_ARGS(vm)
499 );
500
501 DEFINE_EVENT(xe_vm, xe_vm_restart,
502              TP_PROTO(struct xe_vm *vm),
503              TP_ARGS(vm)
504 );
505
506 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
507              TP_PROTO(struct xe_vm *vm),
508              TP_ARGS(vm)
509 );
510
511 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
512              TP_PROTO(struct xe_vm *vm),
513              TP_ARGS(vm)
514 );
515
516 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
517              TP_PROTO(struct xe_vm *vm),
518              TP_ARGS(vm)
519 );
520
521 /* GuC */
522 DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
523                     TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
524                     TP_ARGS(_head, _tail, size, space, len),
525
526                     TP_STRUCT__entry(
527                              __field(u32, _head)
528                              __field(u32, _tail)
529                              __field(u32, size)
530                              __field(u32, space)
531                              __field(u32, len)
532                              ),
533
534                     TP_fast_assign(
535                            __entry->_head = _head;
536                            __entry->_tail = _tail;
537                            __entry->size = size;
538                            __entry->space = space;
539                            __entry->len = len;
540                            ),
541
542                     TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
543                               __entry->_head, __entry->_tail, __entry->size,
544                               __entry->space, __entry->len)
545 );
546
547 DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
548              TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
549              TP_ARGS(_head, _tail, size, space, len)
550 );
551
552 DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
553                    TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
554                    TP_ARGS(_head, _tail, size, space, len),
555
556                    TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
557                              __entry->_head, __entry->_tail, __entry->size,
558                              __entry->space, __entry->len)
559 );
560
561 DECLARE_EVENT_CLASS(xe_guc_ctb,
562                     TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
563                     TP_ARGS(gt_id, action, len, _head, tail),
564
565                     TP_STRUCT__entry(
566                                 __field(u8, gt_id)
567                                 __field(u32, action)
568                                 __field(u32, len)
569                                 __field(u32, tail)
570                                 __field(u32, _head)
571                     ),
572
573                     TP_fast_assign(
574                             __entry->gt_id = gt_id;
575                             __entry->action = action;
576                             __entry->len = len;
577                             __entry->tail = tail;
578                             __entry->_head = _head;
579                     ),
580
581                     TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
582                               __entry->gt_id, __entry->action, __entry->len,
583                               __entry->tail, __entry->_head)
584 );
585
586 DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
587              TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
588              TP_ARGS(gt_id, action, len, _head, tail)
589 );
590
591 DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
592                    TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
593                    TP_ARGS(gt_id, action, len, _head, tail),
594
595                    TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
596                              __entry->gt_id, __entry->action, __entry->len,
597                              __entry->tail, __entry->_head)
598
599 );
600
601 #endif
602
603 /* This part must be outside protection */
604 #undef TRACE_INCLUDE_PATH
605 #undef TRACE_INCLUDE_FILE
606 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
607 #define TRACE_INCLUDE_FILE xe_trace
608 #include <trace/define_trace.h>