Merge tag 'pinctrl-v6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-block.git] / drivers / gpu / drm / xe / xe_guc_ct.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5
6 #include "xe_guc_ct.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
11
12 #include <kunit/static_stub.h>
13
14 #include <drm/drm_managed.h>
15
16 #include "abi/guc_actions_abi.h"
17 #include "abi/guc_actions_sriov_abi.h"
18 #include "abi/guc_klvs_abi.h"
19 #include "xe_bo.h"
20 #include "xe_device.h"
21 #include "xe_gt.h"
22 #include "xe_gt_pagefault.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_tlb_invalidation.h"
25 #include "xe_guc.h"
26 #include "xe_guc_relay.h"
27 #include "xe_guc_submit.h"
28 #include "xe_map.h"
29 #include "xe_pm.h"
30 #include "xe_trace.h"
31
32 /* Used when a CT send wants to block and / or receive data */
33 struct g2h_fence {
34         u32 *response_buffer;
35         u32 seqno;
36         u32 response_data;
37         u16 response_len;
38         u16 error;
39         u16 hint;
40         u16 reason;
41         bool retry;
42         bool fail;
43         bool done;
44 };
45
46 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
47 {
48         g2h_fence->response_buffer = response_buffer;
49         g2h_fence->response_data = 0;
50         g2h_fence->response_len = 0;
51         g2h_fence->fail = false;
52         g2h_fence->retry = false;
53         g2h_fence->done = false;
54         g2h_fence->seqno = ~0x0;
55 }
56
57 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
58 {
59         return g2h_fence->seqno == ~0x0;
60 }
61
62 static struct xe_guc *
63 ct_to_guc(struct xe_guc_ct *ct)
64 {
65         return container_of(ct, struct xe_guc, ct);
66 }
67
68 static struct xe_gt *
69 ct_to_gt(struct xe_guc_ct *ct)
70 {
71         return container_of(ct, struct xe_gt, uc.guc.ct);
72 }
73
74 static struct xe_device *
75 ct_to_xe(struct xe_guc_ct *ct)
76 {
77         return gt_to_xe(ct_to_gt(ct));
78 }
79
80 /**
81  * DOC: GuC CTB Blob
82  *
83  * We allocate single blob to hold both CTB descriptors and buffers:
84  *
85  *      +--------+-----------------------------------------------+------+
86  *      | offset | contents                                      | size |
87  *      +========+===============================================+======+
88  *      | 0x0000 | H2G CTB Descriptor (send)                     |      |
89  *      +--------+-----------------------------------------------+  4K  |
90  *      | 0x0800 | G2H CTB Descriptor (g2h)                      |      |
91  *      +--------+-----------------------------------------------+------+
92  *      | 0x1000 | H2G CT Buffer (send)                          | n*4K |
93  *      |        |                                               |      |
94  *      +--------+-----------------------------------------------+------+
95  *      | 0x1000 | G2H CT Buffer (g2h)                           | m*4K |
96  *      | + n*4K |                                               |      |
97  *      +--------+-----------------------------------------------+------+
98  *
99  * Size of each ``CT Buffer`` must be multiple of 4K.
100  * We don't expect too many messages in flight at any time, unless we are
101  * using the GuC submission. In that case each request requires a minimum
102  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
103  * enough space to avoid backpressure on the driver. We increase the size
104  * of the receive buffer (relative to the send) to ensure a G2H response
105  * CTB has a landing spot.
106  */
107
108 #define CTB_DESC_SIZE           ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
109 #define CTB_H2G_BUFFER_SIZE     (SZ_4K)
110 #define CTB_G2H_BUFFER_SIZE     (4 * CTB_H2G_BUFFER_SIZE)
111 #define G2H_ROOM_BUFFER_SIZE    (CTB_G2H_BUFFER_SIZE / 4)
112
113 static size_t guc_ct_size(void)
114 {
115         return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
116                 CTB_G2H_BUFFER_SIZE;
117 }
118
119 static void guc_ct_fini(struct drm_device *drm, void *arg)
120 {
121         struct xe_guc_ct *ct = arg;
122
123         xa_destroy(&ct->fence_lookup);
124 }
125
126 static void g2h_worker_func(struct work_struct *w);
127
128 static void primelockdep(struct xe_guc_ct *ct)
129 {
130         if (!IS_ENABLED(CONFIG_LOCKDEP))
131                 return;
132
133         fs_reclaim_acquire(GFP_KERNEL);
134         might_lock(&ct->lock);
135         fs_reclaim_release(GFP_KERNEL);
136 }
137
138 int xe_guc_ct_init(struct xe_guc_ct *ct)
139 {
140         struct xe_device *xe = ct_to_xe(ct);
141         struct xe_gt *gt = ct_to_gt(ct);
142         struct xe_tile *tile = gt_to_tile(gt);
143         struct xe_bo *bo;
144         int err;
145
146         xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
147
148         drmm_mutex_init(&xe->drm, &ct->lock);
149         spin_lock_init(&ct->fast_lock);
150         xa_init(&ct->fence_lookup);
151         INIT_WORK(&ct->g2h_worker, g2h_worker_func);
152         init_waitqueue_head(&ct->wq);
153         init_waitqueue_head(&ct->g2h_fence_wq);
154
155         primelockdep(ct);
156
157         bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
158                                           XE_BO_CREATE_SYSTEM_BIT |
159                                           XE_BO_CREATE_GGTT_BIT);
160         if (IS_ERR(bo))
161                 return PTR_ERR(bo);
162
163         ct->bo = bo;
164
165         err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
166         if (err)
167                 return err;
168
169         xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
170         ct->state = XE_GUC_CT_STATE_DISABLED;
171         return 0;
172 }
173
174 #define desc_read(xe_, guc_ctb__, field_)                       \
175         xe_map_rd_field(xe_, &guc_ctb__->desc, 0,               \
176                         struct guc_ct_buffer_desc, field_)
177
178 #define desc_write(xe_, guc_ctb__, field_, val_)                \
179         xe_map_wr_field(xe_, &guc_ctb__->desc, 0,               \
180                         struct guc_ct_buffer_desc, field_, val_)
181
182 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
183                                 struct iosys_map *map)
184 {
185         h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
186         h2g->info.resv_space = 0;
187         h2g->info.tail = 0;
188         h2g->info.head = 0;
189         h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
190                                      h2g->info.size) -
191                           h2g->info.resv_space;
192         h2g->info.broken = false;
193
194         h2g->desc = *map;
195         xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
196
197         h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
198 }
199
200 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
201                                 struct iosys_map *map)
202 {
203         g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
204         g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
205         g2h->info.head = 0;
206         g2h->info.tail = 0;
207         g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
208                                      g2h->info.size) -
209                           g2h->info.resv_space;
210         g2h->info.broken = false;
211
212         g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
213         xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
214
215         g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
216                                             CTB_H2G_BUFFER_SIZE);
217 }
218
219 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
220 {
221         struct xe_guc *guc = ct_to_guc(ct);
222         u32 desc_addr, ctb_addr, size;
223         int err;
224
225         desc_addr = xe_bo_ggtt_addr(ct->bo);
226         ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
227         size = ct->ctbs.h2g.info.size * sizeof(u32);
228
229         err = xe_guc_self_cfg64(guc,
230                                 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
231                                 desc_addr);
232         if (err)
233                 return err;
234
235         err = xe_guc_self_cfg64(guc,
236                                 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
237                                 ctb_addr);
238         if (err)
239                 return err;
240
241         return xe_guc_self_cfg32(guc,
242                                  GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
243                                  size);
244 }
245
246 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
247 {
248         struct xe_guc *guc = ct_to_guc(ct);
249         u32 desc_addr, ctb_addr, size;
250         int err;
251
252         desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
253         ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
254                 CTB_H2G_BUFFER_SIZE;
255         size = ct->ctbs.g2h.info.size * sizeof(u32);
256
257         err = xe_guc_self_cfg64(guc,
258                                 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
259                                 desc_addr);
260         if (err)
261                 return err;
262
263         err = xe_guc_self_cfg64(guc,
264                                 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
265                                 ctb_addr);
266         if (err)
267                 return err;
268
269         return xe_guc_self_cfg32(guc,
270                                  GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
271                                  size);
272 }
273
274 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
275 {
276         u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
277                 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
278                 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
279                 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
280                            GUC_ACTION_HOST2GUC_CONTROL_CTB),
281                 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
282                            enable ? GUC_CTB_CONTROL_ENABLE :
283                            GUC_CTB_CONTROL_DISABLE),
284         };
285         int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
286
287         return ret > 0 ? -EPROTO : ret;
288 }
289
290 static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
291                                 enum xe_guc_ct_state state)
292 {
293         mutex_lock(&ct->lock);          /* Serialise dequeue_one_g2h() */
294         spin_lock_irq(&ct->fast_lock);  /* Serialise CT fast-path */
295
296         xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
297                      state == XE_GUC_CT_STATE_STOPPED);
298
299         ct->g2h_outstanding = 0;
300         ct->state = state;
301
302         spin_unlock_irq(&ct->fast_lock);
303
304         /*
305          * Lockdep doesn't like this under the fast lock and he destroy only
306          * needs to be serialized with the send path which ct lock provides.
307          */
308         xa_destroy(&ct->fence_lookup);
309
310         mutex_unlock(&ct->lock);
311 }
312
313 int xe_guc_ct_enable(struct xe_guc_ct *ct)
314 {
315         struct xe_device *xe = ct_to_xe(ct);
316         int err;
317
318         xe_assert(xe, !xe_guc_ct_enabled(ct));
319
320         guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
321         guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
322
323         err = guc_ct_ctb_h2g_register(ct);
324         if (err)
325                 goto err_out;
326
327         err = guc_ct_ctb_g2h_register(ct);
328         if (err)
329                 goto err_out;
330
331         err = guc_ct_control_toggle(ct, true);
332         if (err)
333                 goto err_out;
334
335         xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
336
337         smp_mb();
338         wake_up_all(&ct->wq);
339         drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
340
341         return 0;
342
343 err_out:
344         drm_err(&xe->drm, "Failed to enable CT (%d)\n", err);
345
346         return err;
347 }
348
349 static void stop_g2h_handler(struct xe_guc_ct *ct)
350 {
351         cancel_work_sync(&ct->g2h_worker);
352 }
353
354 /**
355  * xe_guc_ct_disable - Set GuC to disabled state
356  * @ct: the &xe_guc_ct
357  *
358  * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
359  * in this transition.
360  */
361 void xe_guc_ct_disable(struct xe_guc_ct *ct)
362 {
363         xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
364         stop_g2h_handler(ct);
365 }
366
367 /**
368  * xe_guc_ct_stop - Set GuC to stopped state
369  * @ct: the &xe_guc_ct
370  *
371  * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
372  */
373 void xe_guc_ct_stop(struct xe_guc_ct *ct)
374 {
375         xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
376         stop_g2h_handler(ct);
377 }
378
379 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
380 {
381         struct guc_ctb *h2g = &ct->ctbs.h2g;
382
383         lockdep_assert_held(&ct->lock);
384
385         if (cmd_len > h2g->info.space) {
386                 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
387                 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
388                                              h2g->info.size) -
389                                   h2g->info.resv_space;
390                 if (cmd_len > h2g->info.space)
391                         return false;
392         }
393
394         return true;
395 }
396
397 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
398 {
399         if (!g2h_len)
400                 return true;
401
402         lockdep_assert_held(&ct->fast_lock);
403
404         return ct->ctbs.g2h.info.space > g2h_len;
405 }
406
407 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
408 {
409         lockdep_assert_held(&ct->lock);
410
411         if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
412                 return -EBUSY;
413
414         return 0;
415 }
416
417 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
418 {
419         lockdep_assert_held(&ct->lock);
420         ct->ctbs.h2g.info.space -= cmd_len;
421 }
422
423 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
424 {
425         xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
426
427         if (g2h_len) {
428                 lockdep_assert_held(&ct->fast_lock);
429
430                 ct->ctbs.g2h.info.space -= g2h_len;
431                 ct->g2h_outstanding += num_g2h;
432         }
433 }
434
435 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
436 {
437         lockdep_assert_held(&ct->fast_lock);
438         xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
439                   ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
440
441         ct->ctbs.g2h.info.space += g2h_len;
442         --ct->g2h_outstanding;
443 }
444
445 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
446 {
447         spin_lock_irq(&ct->fast_lock);
448         __g2h_release_space(ct, g2h_len);
449         spin_unlock_irq(&ct->fast_lock);
450 }
451
452 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
453
454 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
455                      u32 ct_fence_value, bool want_response)
456 {
457         struct xe_device *xe = ct_to_xe(ct);
458         struct guc_ctb *h2g = &ct->ctbs.h2g;
459         u32 cmd[H2G_CT_HEADERS];
460         u32 tail = h2g->info.tail;
461         u32 full_len;
462         struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
463                                                          tail * sizeof(u32));
464
465         full_len = len + GUC_CTB_HDR_LEN;
466
467         lockdep_assert_held(&ct->lock);
468         xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN);
469         xe_assert(xe, tail <= h2g->info.size);
470
471         /* Command will wrap, zero fill (NOPs), return and check credits again */
472         if (tail + full_len > h2g->info.size) {
473                 xe_map_memset(xe, &map, 0, 0,
474                               (h2g->info.size - tail) * sizeof(u32));
475                 h2g_reserve_space(ct, (h2g->info.size - tail));
476                 h2g->info.tail = 0;
477                 desc_write(xe, h2g, tail, h2g->info.tail);
478
479                 return -EAGAIN;
480         }
481
482         /*
483          * dw0: CT header (including fence)
484          * dw1: HXG header (including action code)
485          * dw2+: action data
486          */
487         cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
488                 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
489                 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
490         if (want_response) {
491                 cmd[1] =
492                         FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
493                         FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
494                                    GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
495         } else {
496                 cmd[1] =
497                         FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
498                         FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
499                                    GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
500         }
501
502         /* H2G header in cmd[1] replaces action[0] so: */
503         --len;
504         ++action;
505
506         /* Write H2G ensuring visable before descriptor update */
507         xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
508         xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
509         xe_device_wmb(xe);
510
511         /* Update local copies */
512         h2g->info.tail = (tail + full_len) % h2g->info.size;
513         h2g_reserve_space(ct, full_len);
514
515         /* Update descriptor */
516         desc_write(xe, h2g, tail, h2g->info.tail);
517
518         trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
519                              desc_read(xe, h2g, head), h2g->info.tail);
520
521         return 0;
522 }
523
524 /*
525  * The CT protocol accepts a 16 bits fence. This field is fully owned by the
526  * driver, the GuC will just copy it to the reply message. Since we need to
527  * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
528  * we use one bit of the seqno as an indicator for that and a rolling counter
529  * for the remaining 15 bits.
530  */
531 #define CT_SEQNO_MASK GENMASK(14, 0)
532 #define CT_SEQNO_UNTRACKED BIT(15)
533 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
534 {
535         u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
536
537         if (!is_g2h_fence)
538                 seqno |= CT_SEQNO_UNTRACKED;
539
540         return seqno;
541 }
542
543 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
544                                 u32 len, u32 g2h_len, u32 num_g2h,
545                                 struct g2h_fence *g2h_fence)
546 {
547         struct xe_device *xe = ct_to_xe(ct);
548         u16 seqno;
549         int ret;
550
551         xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
552         xe_assert(xe, !g2h_len || !g2h_fence);
553         xe_assert(xe, !num_g2h || !g2h_fence);
554         xe_assert(xe, !g2h_len || num_g2h);
555         xe_assert(xe, g2h_len || !num_g2h);
556         lockdep_assert_held(&ct->lock);
557
558         if (unlikely(ct->ctbs.h2g.info.broken)) {
559                 ret = -EPIPE;
560                 goto out;
561         }
562
563         if (ct->state == XE_GUC_CT_STATE_DISABLED) {
564                 ret = -ENODEV;
565                 goto out;
566         }
567
568         if (ct->state == XE_GUC_CT_STATE_STOPPED) {
569                 ret = -ECANCELED;
570                 goto out;
571         }
572
573         xe_assert(xe, xe_guc_ct_enabled(ct));
574
575         if (g2h_fence) {
576                 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
577                 num_g2h = 1;
578
579                 if (g2h_fence_needs_alloc(g2h_fence)) {
580                         void *ptr;
581
582                         g2h_fence->seqno = next_ct_seqno(ct, true);
583                         ptr = xa_store(&ct->fence_lookup,
584                                        g2h_fence->seqno,
585                                        g2h_fence, GFP_ATOMIC);
586                         if (IS_ERR(ptr)) {
587                                 ret = PTR_ERR(ptr);
588                                 goto out;
589                         }
590                 }
591
592                 seqno = g2h_fence->seqno;
593         } else {
594                 seqno = next_ct_seqno(ct, false);
595         }
596
597         if (g2h_len)
598                 spin_lock_irq(&ct->fast_lock);
599 retry:
600         ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
601         if (unlikely(ret))
602                 goto out_unlock;
603
604         ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
605         if (unlikely(ret)) {
606                 if (ret == -EAGAIN)
607                         goto retry;
608                 goto out_unlock;
609         }
610
611         __g2h_reserve_space(ct, g2h_len, num_g2h);
612         xe_guc_notify(ct_to_guc(ct));
613 out_unlock:
614         if (g2h_len)
615                 spin_unlock_irq(&ct->fast_lock);
616 out:
617         return ret;
618 }
619
620 static void kick_reset(struct xe_guc_ct *ct)
621 {
622         xe_gt_reset_async(ct_to_gt(ct));
623 }
624
625 static int dequeue_one_g2h(struct xe_guc_ct *ct);
626
627 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
628                               u32 g2h_len, u32 num_g2h,
629                               struct g2h_fence *g2h_fence)
630 {
631         struct drm_device *drm = &ct_to_xe(ct)->drm;
632         struct drm_printer p = drm_info_printer(drm->dev);
633         unsigned int sleep_period_ms = 1;
634         int ret;
635
636         xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
637         lockdep_assert_held(&ct->lock);
638         xe_device_assert_mem_access(ct_to_xe(ct));
639
640 try_again:
641         ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
642                                    g2h_fence);
643
644         /*
645          * We wait to try to restore credits for about 1 second before bailing.
646          * In the case of H2G credits we have no choice but just to wait for the
647          * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
648          * the case of G2H we process any G2H in the channel, hopefully freeing
649          * credits as we consume the G2H messages.
650          */
651         if (unlikely(ret == -EBUSY &&
652                      !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
653                 struct guc_ctb *h2g = &ct->ctbs.h2g;
654
655                 if (sleep_period_ms == 1024)
656                         goto broken;
657
658                 trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail,
659                                                  h2g->info.size,
660                                                  h2g->info.space,
661                                                  len + GUC_CTB_HDR_LEN);
662                 msleep(sleep_period_ms);
663                 sleep_period_ms <<= 1;
664
665                 goto try_again;
666         } else if (unlikely(ret == -EBUSY)) {
667                 struct xe_device *xe = ct_to_xe(ct);
668                 struct guc_ctb *g2h = &ct->ctbs.g2h;
669
670                 trace_xe_guc_ct_g2h_flow_control(g2h->info.head,
671                                                  desc_read(xe, g2h, tail),
672                                                  g2h->info.size,
673                                                  g2h->info.space,
674                                                  g2h_fence ?
675                                                  GUC_CTB_HXG_MSG_MAX_LEN :
676                                                  g2h_len);
677
678 #define g2h_avail(ct)   \
679         (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
680                 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
681                                         g2h_avail(ct), HZ))
682                         goto broken;
683 #undef g2h_avail
684
685                 if (dequeue_one_g2h(ct) < 0)
686                         goto broken;
687
688                 goto try_again;
689         }
690
691         return ret;
692
693 broken:
694         drm_err(drm, "No forward process on H2G, reset required");
695         xe_guc_ct_print(ct, &p, true);
696         ct->ctbs.h2g.info.broken = true;
697
698         return -EDEADLK;
699 }
700
701 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
702                        u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
703 {
704         int ret;
705
706         xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
707
708         mutex_lock(&ct->lock);
709         ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
710         mutex_unlock(&ct->lock);
711
712         return ret;
713 }
714
715 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
716                    u32 g2h_len, u32 num_g2h)
717 {
718         int ret;
719
720         ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
721         if (ret == -EDEADLK)
722                 kick_reset(ct);
723
724         return ret;
725 }
726
727 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
728                           u32 g2h_len, u32 num_g2h)
729 {
730         int ret;
731
732         ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
733         if (ret == -EDEADLK)
734                 kick_reset(ct);
735
736         return ret;
737 }
738
739 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
740 {
741         int ret;
742
743         lockdep_assert_held(&ct->lock);
744
745         ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
746         if (ret == -EDEADLK)
747                 kick_reset(ct);
748
749         return ret;
750 }
751
752 /*
753  * Check if a GT reset is in progress or will occur and if GT reset brought the
754  * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
755  */
756 static bool retry_failure(struct xe_guc_ct *ct, int ret)
757 {
758         if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
759                 return false;
760
761 #define ct_alive(ct)    \
762         (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
763          !ct->ctbs.g2h.info.broken)
764         if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct),  HZ * 5))
765                 return false;
766 #undef ct_alive
767
768         return true;
769 }
770
771 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
772                             u32 *response_buffer, bool no_fail)
773 {
774         struct xe_device *xe = ct_to_xe(ct);
775         struct g2h_fence g2h_fence;
776         int ret = 0;
777
778         /*
779          * We use a fence to implement blocking sends / receiving response data.
780          * The seqno of the fence is sent in the H2G, returned in the G2H, and
781          * an xarray is used as storage media with the seqno being to key.
782          * Fields in the fence hold success, failure, retry status and the
783          * response data. Safe to allocate on the stack as the xarray is the
784          * only reference and it cannot be present after this function exits.
785          */
786 retry:
787         g2h_fence_init(&g2h_fence, response_buffer);
788 retry_same_fence:
789         ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
790         if (unlikely(ret == -ENOMEM)) {
791                 void *ptr;
792
793                 /* Retry allocation /w GFP_KERNEL */
794                 ptr = xa_store(&ct->fence_lookup,
795                                g2h_fence.seqno,
796                                &g2h_fence, GFP_KERNEL);
797                 if (IS_ERR(ptr))
798                         return PTR_ERR(ptr);
799
800                 goto retry_same_fence;
801         } else if (unlikely(ret)) {
802                 if (ret == -EDEADLK)
803                         kick_reset(ct);
804
805                 if (no_fail && retry_failure(ct, ret))
806                         goto retry_same_fence;
807
808                 if (!g2h_fence_needs_alloc(&g2h_fence))
809                         xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
810
811                 return ret;
812         }
813
814         ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
815         if (!ret) {
816                 drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
817                         g2h_fence.seqno, action[0]);
818                 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
819                 return -ETIME;
820         }
821
822         if (g2h_fence.retry) {
823                 drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
824                          action[0], g2h_fence.reason);
825                 goto retry;
826         }
827         if (g2h_fence.fail) {
828                 drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
829                         action[0], g2h_fence.error, g2h_fence.hint);
830                 ret = -EIO;
831         }
832
833         return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
834 }
835
836 /**
837  * xe_guc_ct_send_recv - Send and receive HXG to the GuC
838  * @ct: the &xe_guc_ct
839  * @action: the dword array with `HXG Request`_ message (can't be NULL)
840  * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
841  * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
842  *
843  * Send a `HXG Request`_ message to the GuC over CT communication channel and
844  * blocks until GuC replies with a `HXG Response`_ message.
845  *
846  * For non-blocking communication with GuC use xe_guc_ct_send().
847  *
848  * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
849  *
850  * Return: response length (in dwords) if &response_buffer was not NULL, or
851  *         DATA0 from `HXG Response`_ if &response_buffer was NULL, or
852  *         a negative error code on failure.
853  */
854 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
855                         u32 *response_buffer)
856 {
857         KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
858         return guc_ct_send_recv(ct, action, len, response_buffer, false);
859 }
860
861 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
862                                 u32 len, u32 *response_buffer)
863 {
864         return guc_ct_send_recv(ct, action, len, response_buffer, true);
865 }
866
867 static u32 *msg_to_hxg(u32 *msg)
868 {
869         return msg + GUC_CTB_MSG_MIN_LEN;
870 }
871
872 static u32 msg_len_to_hxg_len(u32 len)
873 {
874         return len - GUC_CTB_MSG_MIN_LEN;
875 }
876
877 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
878 {
879         u32 *hxg = msg_to_hxg(msg);
880         u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
881
882         lockdep_assert_held(&ct->lock);
883
884         switch (action) {
885         case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
886         case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
887         case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
888         case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
889                 g2h_release_space(ct, len);
890         }
891
892         return 0;
893 }
894
895 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
896 {
897         struct xe_gt *gt =  ct_to_gt(ct);
898         struct xe_device *xe = gt_to_xe(gt);
899         u32 *hxg = msg_to_hxg(msg);
900         u32 hxg_len = msg_len_to_hxg_len(len);
901         u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
902         u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
903         struct g2h_fence *g2h_fence;
904
905         lockdep_assert_held(&ct->lock);
906
907         /*
908          * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
909          * Those messages should never fail, so if we do get an error back it
910          * means we're likely doing an illegal operation and the GuC is
911          * rejecting it. We have no way to inform the code that submitted the
912          * H2G that the message was rejected, so we need to escalate the
913          * failure to trigger a reset.
914          */
915         if (fence & CT_SEQNO_UNTRACKED) {
916                 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
917                         xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
918                                   fence,
919                                   FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
920                                   FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
921                 else
922                         xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
923                                   type, fence);
924
925                 return -EPROTO;
926         }
927
928         g2h_fence = xa_erase(&ct->fence_lookup, fence);
929         if (unlikely(!g2h_fence)) {
930                 /* Don't tear down channel, as send could've timed out */
931                 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
932                 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
933                 return 0;
934         }
935
936         xe_assert(xe, fence == g2h_fence->seqno);
937
938         if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
939                 g2h_fence->fail = true;
940                 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
941                 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
942         } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
943                 g2h_fence->retry = true;
944                 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
945         } else if (g2h_fence->response_buffer) {
946                 g2h_fence->response_len = hxg_len;
947                 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
948         } else {
949                 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
950         }
951
952         g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
953
954         g2h_fence->done = true;
955         smp_mb();
956
957         wake_up_all(&ct->g2h_fence_wq);
958
959         return 0;
960 }
961
962 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
963 {
964         struct xe_device *xe = ct_to_xe(ct);
965         u32 *hxg = msg_to_hxg(msg);
966         u32 origin, type;
967         int ret;
968
969         lockdep_assert_held(&ct->lock);
970
971         origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
972         if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
973                 drm_err(&xe->drm,
974                         "G2H channel broken on read, origin=%d, reset required\n",
975                         origin);
976                 ct->ctbs.g2h.info.broken = true;
977
978                 return -EPROTO;
979         }
980
981         type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
982         switch (type) {
983         case GUC_HXG_TYPE_EVENT:
984                 ret = parse_g2h_event(ct, msg, len);
985                 break;
986         case GUC_HXG_TYPE_RESPONSE_SUCCESS:
987         case GUC_HXG_TYPE_RESPONSE_FAILURE:
988         case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
989                 ret = parse_g2h_response(ct, msg, len);
990                 break;
991         default:
992                 drm_err(&xe->drm,
993                         "G2H channel broken on read, type=%d, reset required\n",
994                         type);
995                 ct->ctbs.g2h.info.broken = true;
996
997                 ret = -EOPNOTSUPP;
998         }
999
1000         return ret;
1001 }
1002
1003 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1004 {
1005         struct xe_device *xe = ct_to_xe(ct);
1006         struct xe_guc *guc = ct_to_guc(ct);
1007         u32 hxg_len = msg_len_to_hxg_len(len);
1008         u32 *hxg = msg_to_hxg(msg);
1009         u32 action, adj_len;
1010         u32 *payload;
1011         int ret = 0;
1012
1013         if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1014                 return 0;
1015
1016         action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1017         payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1018         adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1019
1020         switch (action) {
1021         case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1022                 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1023                 break;
1024         case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1025                 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1026                 break;
1027         case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1028                 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1029                 break;
1030         case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1031                 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1032                                                               adj_len);
1033                 break;
1034         case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1035                 /* Selftest only at the moment */
1036                 break;
1037         case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1038         case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1039                 /* FIXME: Handle this */
1040                 break;
1041         case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1042                 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1043                                                                  adj_len);
1044                 break;
1045         case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1046                 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1047                 break;
1048         case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1049                 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1050                                                            adj_len);
1051                 break;
1052         case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
1053                 ret = xe_guc_access_counter_notify_handler(guc, payload,
1054                                                            adj_len);
1055                 break;
1056         case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1057                 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1058                 break;
1059         case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1060                 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1061                 break;
1062         default:
1063                 drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
1064         }
1065
1066         if (ret)
1067                 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1068                         action, ret);
1069
1070         return 0;
1071 }
1072
1073 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1074 {
1075         struct xe_device *xe = ct_to_xe(ct);
1076         struct guc_ctb *g2h = &ct->ctbs.g2h;
1077         u32 tail, head, len;
1078         s32 avail;
1079         u32 action;
1080         u32 *hxg;
1081
1082         xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
1083         lockdep_assert_held(&ct->fast_lock);
1084
1085         if (ct->state == XE_GUC_CT_STATE_DISABLED)
1086                 return -ENODEV;
1087
1088         if (ct->state == XE_GUC_CT_STATE_STOPPED)
1089                 return -ECANCELED;
1090
1091         if (g2h->info.broken)
1092                 return -EPIPE;
1093
1094         xe_assert(xe, xe_guc_ct_enabled(ct));
1095
1096         /* Calculate DW available to read */
1097         tail = desc_read(xe, g2h, tail);
1098         avail = tail - g2h->info.head;
1099         if (unlikely(avail == 0))
1100                 return 0;
1101
1102         if (avail < 0)
1103                 avail += g2h->info.size;
1104
1105         /* Read header */
1106         xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1107                            sizeof(u32));
1108         len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1109         if (len > avail) {
1110                 drm_err(&xe->drm,
1111                         "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1112                         avail, len);
1113                 g2h->info.broken = true;
1114
1115                 return -EPROTO;
1116         }
1117
1118         head = (g2h->info.head + 1) % g2h->info.size;
1119         avail = len - 1;
1120
1121         /* Read G2H message */
1122         if (avail + head > g2h->info.size) {
1123                 u32 avail_til_wrap = g2h->info.size - head;
1124
1125                 xe_map_memcpy_from(xe, msg + 1,
1126                                    &g2h->cmds, sizeof(u32) * head,
1127                                    avail_til_wrap * sizeof(u32));
1128                 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1129                                    &g2h->cmds, 0,
1130                                    (avail - avail_til_wrap) * sizeof(u32));
1131         } else {
1132                 xe_map_memcpy_from(xe, msg + 1,
1133                                    &g2h->cmds, sizeof(u32) * head,
1134                                    avail * sizeof(u32));
1135         }
1136
1137         hxg = msg_to_hxg(msg);
1138         action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1139
1140         if (fast_path) {
1141                 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1142                         return 0;
1143
1144                 switch (action) {
1145                 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1146                 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1147                         break;  /* Process these in fast-path */
1148                 default:
1149                         return 0;
1150                 }
1151         }
1152
1153         /* Update local / descriptor header */
1154         g2h->info.head = (head + avail) % g2h->info.size;
1155         desc_write(xe, g2h, head, g2h->info.head);
1156
1157         trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len,
1158                              g2h->info.head, tail);
1159
1160         return len;
1161 }
1162
1163 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1164 {
1165         struct xe_device *xe = ct_to_xe(ct);
1166         struct xe_guc *guc = ct_to_guc(ct);
1167         u32 hxg_len = msg_len_to_hxg_len(len);
1168         u32 *hxg = msg_to_hxg(msg);
1169         u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1170         u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1171         u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1172         int ret = 0;
1173
1174         switch (action) {
1175         case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1176                 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1177                 break;
1178         case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1179                 __g2h_release_space(ct, len);
1180                 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1181                                                            adj_len);
1182                 break;
1183         default:
1184                 drm_warn(&xe->drm, "NOT_POSSIBLE");
1185         }
1186
1187         if (ret)
1188                 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1189                         action, ret);
1190 }
1191
1192 /**
1193  * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1194  * @ct: GuC CT object
1195  *
1196  * Anything related to page faults is critical for performance, process these
1197  * critical G2H in the IRQ. This is safe as these handlers either just wake up
1198  * waiters or queue another worker.
1199  */
1200 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1201 {
1202         struct xe_device *xe = ct_to_xe(ct);
1203         bool ongoing;
1204         int len;
1205
1206         ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1207         if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1208                 return;
1209
1210         spin_lock(&ct->fast_lock);
1211         do {
1212                 len = g2h_read(ct, ct->fast_msg, true);
1213                 if (len > 0)
1214                         g2h_fast_path(ct, ct->fast_msg, len);
1215         } while (len > 0);
1216         spin_unlock(&ct->fast_lock);
1217
1218         if (ongoing)
1219                 xe_device_mem_access_put(xe);
1220 }
1221
1222 /* Returns less than zero on error, 0 on done, 1 on more available */
1223 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1224 {
1225         int len;
1226         int ret;
1227
1228         lockdep_assert_held(&ct->lock);
1229
1230         spin_lock_irq(&ct->fast_lock);
1231         len = g2h_read(ct, ct->msg, false);
1232         spin_unlock_irq(&ct->fast_lock);
1233         if (len <= 0)
1234                 return len;
1235
1236         ret = parse_g2h_msg(ct, ct->msg, len);
1237         if (unlikely(ret < 0))
1238                 return ret;
1239
1240         ret = process_g2h_msg(ct, ct->msg, len);
1241         if (unlikely(ret < 0))
1242                 return ret;
1243
1244         return 1;
1245 }
1246
1247 static void g2h_worker_func(struct work_struct *w)
1248 {
1249         struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1250         bool ongoing;
1251         int ret;
1252
1253         /*
1254          * Normal users must always hold mem_access.ref around CT calls. However
1255          * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1256          * at this stage we can't rely on mem_access.ref and even the
1257          * callback_task will be different than current.  For such cases we just
1258          * need to ensure we always process the responses from any blocking
1259          * ct_send requests or where we otherwise expect some response when
1260          * initiated from those callbacks (which will need to wait for the below
1261          * dequeue_one_g2h()).  The dequeue_one_g2h() will gracefully fail if
1262          * the device has suspended to the point that the CT communication has
1263          * been disabled.
1264          *
1265          * If we are inside the runtime pm callback, we can be the only task
1266          * still issuing CT requests (since that requires having the
1267          * mem_access.ref).  It seems like it might in theory be possible to
1268          * receive unsolicited events from the GuC just as we are
1269          * suspending-resuming, but those will currently anyway be lost when
1270          * eventually exiting from suspend, hence no need to wake up the device
1271          * here. If we ever need something stronger than get_if_ongoing() then
1272          * we need to be careful with blocking the pm callbacks from getting CT
1273          * responses, if the worker here is blocked on those callbacks
1274          * completing, creating a deadlock.
1275          */
1276         ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1277         if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1278                 return;
1279
1280         do {
1281                 mutex_lock(&ct->lock);
1282                 ret = dequeue_one_g2h(ct);
1283                 mutex_unlock(&ct->lock);
1284
1285                 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1286                         struct drm_device *drm = &ct_to_xe(ct)->drm;
1287                         struct drm_printer p = drm_info_printer(drm->dev);
1288
1289                         xe_guc_ct_print(ct, &p, false);
1290                         kick_reset(ct);
1291                 }
1292         } while (ret == 1);
1293
1294         if (ongoing)
1295                 xe_device_mem_access_put(ct_to_xe(ct));
1296 }
1297
1298 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1299                                      struct guc_ctb_snapshot *snapshot,
1300                                      bool atomic)
1301 {
1302         u32 head, tail;
1303
1304         xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1305                            sizeof(struct guc_ct_buffer_desc));
1306         memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1307
1308         snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
1309                                        atomic ? GFP_ATOMIC : GFP_KERNEL);
1310
1311         if (!snapshot->cmds) {
1312                 drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
1313                 return;
1314         }
1315
1316         head = snapshot->desc.head;
1317         tail = snapshot->desc.tail;
1318
1319         if (head != tail) {
1320                 struct iosys_map map =
1321                         IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
1322
1323                 while (head != tail) {
1324                         snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
1325                         ++head;
1326                         if (head == ctb->info.size) {
1327                                 head = 0;
1328                                 map = ctb->cmds;
1329                         } else {
1330                                 iosys_map_incr(&map, sizeof(u32));
1331                         }
1332                 }
1333         }
1334 }
1335
1336 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1337                                    struct drm_printer *p)
1338 {
1339         u32 head, tail;
1340
1341         drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1342         drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1343         drm_printf(p, "\thead: %d\n", snapshot->info.head);
1344         drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1345         drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1346         drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1347         drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1348         drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1349         drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1350
1351         if (!snapshot->cmds)
1352                 return;
1353
1354         head = snapshot->desc.head;
1355         tail = snapshot->desc.tail;
1356
1357         while (head != tail) {
1358                 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
1359                            snapshot->cmds[head]);
1360                 ++head;
1361                 if (head == snapshot->info.size)
1362                         head = 0;
1363         }
1364 }
1365
1366 static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
1367 {
1368         kfree(snapshot->cmds);
1369 }
1370
1371 /**
1372  * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1373  * @ct: GuC CT object.
1374  * @atomic: Boolean to indicate if this is called from atomic context like
1375  * reset or CTB handler or from some regular path like debugfs.
1376  *
1377  * This can be printed out in a later stage like during dev_coredump
1378  * analysis.
1379  *
1380  * Returns: a GuC CT snapshot object that must be freed by the caller
1381  * by using `xe_guc_ct_snapshot_free`.
1382  */
1383 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
1384                                                       bool atomic)
1385 {
1386         struct xe_device *xe = ct_to_xe(ct);
1387         struct xe_guc_ct_snapshot *snapshot;
1388
1389         snapshot = kzalloc(sizeof(*snapshot),
1390                            atomic ? GFP_ATOMIC : GFP_KERNEL);
1391
1392         if (!snapshot) {
1393                 drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
1394                 return NULL;
1395         }
1396
1397         if (xe_guc_ct_enabled(ct)) {
1398                 snapshot->ct_enabled = true;
1399                 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1400                 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
1401                                          &snapshot->h2g, atomic);
1402                 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
1403                                          &snapshot->g2h, atomic);
1404         }
1405
1406         return snapshot;
1407 }
1408
1409 /**
1410  * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1411  * @snapshot: GuC CT snapshot object.
1412  * @p: drm_printer where it will be printed out.
1413  *
1414  * This function prints out a given GuC CT snapshot object.
1415  */
1416 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1417                               struct drm_printer *p)
1418 {
1419         if (!snapshot)
1420                 return;
1421
1422         if (snapshot->ct_enabled) {
1423                 drm_puts(p, "H2G CTB (all sizes in DW):\n");
1424                 guc_ctb_snapshot_print(&snapshot->h2g, p);
1425
1426                 drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
1427                 guc_ctb_snapshot_print(&snapshot->g2h, p);
1428
1429                 drm_printf(p, "\tg2h outstanding: %d\n",
1430                            snapshot->g2h_outstanding);
1431         } else {
1432                 drm_puts(p, "CT disabled\n");
1433         }
1434 }
1435
1436 /**
1437  * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1438  * @snapshot: GuC CT snapshot object.
1439  *
1440  * This function free all the memory that needed to be allocated at capture
1441  * time.
1442  */
1443 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
1444 {
1445         if (!snapshot)
1446                 return;
1447
1448         guc_ctb_snapshot_free(&snapshot->h2g);
1449         guc_ctb_snapshot_free(&snapshot->g2h);
1450         kfree(snapshot);
1451 }
1452
1453 /**
1454  * xe_guc_ct_print - GuC CT Print.
1455  * @ct: GuC CT.
1456  * @p: drm_printer where it will be printed out.
1457  * @atomic: Boolean to indicate if this is called from atomic context like
1458  * reset or CTB handler or from some regular path like debugfs.
1459  *
1460  * This function quickly capture a snapshot and immediately print it out.
1461  */
1462 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
1463 {
1464         struct xe_guc_ct_snapshot *snapshot;
1465
1466         snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
1467         xe_guc_ct_snapshot_print(snapshot, p);
1468         xe_guc_ct_snapshot_free(snapshot);
1469 }