1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
12 #include <kunit/static_stub.h>
14 #include <drm/drm_managed.h>
16 #include "abi/guc_actions_abi.h"
17 #include "abi/guc_actions_sriov_abi.h"
18 #include "abi/guc_klvs_abi.h"
20 #include "xe_device.h"
22 #include "xe_gt_pagefault.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_tlb_invalidation.h"
26 #include "xe_guc_relay.h"
27 #include "xe_guc_submit.h"
32 /* Used when a CT send wants to block and / or receive data */
46 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
48 g2h_fence->response_buffer = response_buffer;
49 g2h_fence->response_data = 0;
50 g2h_fence->response_len = 0;
51 g2h_fence->fail = false;
52 g2h_fence->retry = false;
53 g2h_fence->done = false;
54 g2h_fence->seqno = ~0x0;
57 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
59 return g2h_fence->seqno == ~0x0;
62 static struct xe_guc *
63 ct_to_guc(struct xe_guc_ct *ct)
65 return container_of(ct, struct xe_guc, ct);
69 ct_to_gt(struct xe_guc_ct *ct)
71 return container_of(ct, struct xe_gt, uc.guc.ct);
74 static struct xe_device *
75 ct_to_xe(struct xe_guc_ct *ct)
77 return gt_to_xe(ct_to_gt(ct));
83 * We allocate single blob to hold both CTB descriptors and buffers:
85 * +--------+-----------------------------------------------+------+
86 * | offset | contents | size |
87 * +========+===============================================+======+
88 * | 0x0000 | H2G CTB Descriptor (send) | |
89 * +--------+-----------------------------------------------+ 4K |
90 * | 0x0800 | G2H CTB Descriptor (g2h) | |
91 * +--------+-----------------------------------------------+------+
92 * | 0x1000 | H2G CT Buffer (send) | n*4K |
94 * +--------+-----------------------------------------------+------+
95 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
97 * +--------+-----------------------------------------------+------+
99 * Size of each ``CT Buffer`` must be multiple of 4K.
100 * We don't expect too many messages in flight at any time, unless we are
101 * using the GuC submission. In that case each request requires a minimum
102 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
103 * enough space to avoid backpressure on the driver. We increase the size
104 * of the receive buffer (relative to the send) to ensure a G2H response
105 * CTB has a landing spot.
108 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
109 #define CTB_H2G_BUFFER_SIZE (SZ_4K)
110 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
111 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
113 static size_t guc_ct_size(void)
115 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
119 static void guc_ct_fini(struct drm_device *drm, void *arg)
121 struct xe_guc_ct *ct = arg;
123 xa_destroy(&ct->fence_lookup);
126 static void g2h_worker_func(struct work_struct *w);
128 static void primelockdep(struct xe_guc_ct *ct)
130 if (!IS_ENABLED(CONFIG_LOCKDEP))
133 fs_reclaim_acquire(GFP_KERNEL);
134 might_lock(&ct->lock);
135 fs_reclaim_release(GFP_KERNEL);
138 int xe_guc_ct_init(struct xe_guc_ct *ct)
140 struct xe_device *xe = ct_to_xe(ct);
141 struct xe_gt *gt = ct_to_gt(ct);
142 struct xe_tile *tile = gt_to_tile(gt);
146 xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
148 drmm_mutex_init(&xe->drm, &ct->lock);
149 spin_lock_init(&ct->fast_lock);
150 xa_init(&ct->fence_lookup);
151 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
152 init_waitqueue_head(&ct->wq);
153 init_waitqueue_head(&ct->g2h_fence_wq);
157 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
158 XE_BO_CREATE_SYSTEM_BIT |
159 XE_BO_CREATE_GGTT_BIT);
165 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
169 xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
170 ct->state = XE_GUC_CT_STATE_DISABLED;
174 #define desc_read(xe_, guc_ctb__, field_) \
175 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
176 struct guc_ct_buffer_desc, field_)
178 #define desc_write(xe_, guc_ctb__, field_, val_) \
179 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
180 struct guc_ct_buffer_desc, field_, val_)
182 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
183 struct iosys_map *map)
185 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
186 h2g->info.resv_space = 0;
189 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
191 h2g->info.resv_space;
192 h2g->info.broken = false;
195 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
197 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
200 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
201 struct iosys_map *map)
203 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
204 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
207 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
209 g2h->info.resv_space;
210 g2h->info.broken = false;
212 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
213 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
215 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
216 CTB_H2G_BUFFER_SIZE);
219 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
221 struct xe_guc *guc = ct_to_guc(ct);
222 u32 desc_addr, ctb_addr, size;
225 desc_addr = xe_bo_ggtt_addr(ct->bo);
226 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
227 size = ct->ctbs.h2g.info.size * sizeof(u32);
229 err = xe_guc_self_cfg64(guc,
230 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
235 err = xe_guc_self_cfg64(guc,
236 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
241 return xe_guc_self_cfg32(guc,
242 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
246 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
248 struct xe_guc *guc = ct_to_guc(ct);
249 u32 desc_addr, ctb_addr, size;
252 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
253 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
255 size = ct->ctbs.g2h.info.size * sizeof(u32);
257 err = xe_guc_self_cfg64(guc,
258 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
263 err = xe_guc_self_cfg64(guc,
264 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
269 return xe_guc_self_cfg32(guc,
270 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
274 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
276 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
277 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
278 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
279 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
280 GUC_ACTION_HOST2GUC_CONTROL_CTB),
281 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
282 enable ? GUC_CTB_CONTROL_ENABLE :
283 GUC_CTB_CONTROL_DISABLE),
285 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
287 return ret > 0 ? -EPROTO : ret;
290 static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
291 enum xe_guc_ct_state state)
293 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
294 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
296 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
297 state == XE_GUC_CT_STATE_STOPPED);
299 ct->g2h_outstanding = 0;
302 spin_unlock_irq(&ct->fast_lock);
305 * Lockdep doesn't like this under the fast lock and he destroy only
306 * needs to be serialized with the send path which ct lock provides.
308 xa_destroy(&ct->fence_lookup);
310 mutex_unlock(&ct->lock);
313 int xe_guc_ct_enable(struct xe_guc_ct *ct)
315 struct xe_device *xe = ct_to_xe(ct);
318 xe_assert(xe, !xe_guc_ct_enabled(ct));
320 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
321 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
323 err = guc_ct_ctb_h2g_register(ct);
327 err = guc_ct_ctb_g2h_register(ct);
331 err = guc_ct_control_toggle(ct, true);
335 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
338 wake_up_all(&ct->wq);
339 drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
344 drm_err(&xe->drm, "Failed to enable CT (%d)\n", err);
349 static void stop_g2h_handler(struct xe_guc_ct *ct)
351 cancel_work_sync(&ct->g2h_worker);
355 * xe_guc_ct_disable - Set GuC to disabled state
356 * @ct: the &xe_guc_ct
358 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
359 * in this transition.
361 void xe_guc_ct_disable(struct xe_guc_ct *ct)
363 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
364 stop_g2h_handler(ct);
368 * xe_guc_ct_stop - Set GuC to stopped state
369 * @ct: the &xe_guc_ct
371 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
373 void xe_guc_ct_stop(struct xe_guc_ct *ct)
375 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
376 stop_g2h_handler(ct);
379 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
381 struct guc_ctb *h2g = &ct->ctbs.h2g;
383 lockdep_assert_held(&ct->lock);
385 if (cmd_len > h2g->info.space) {
386 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
387 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
389 h2g->info.resv_space;
390 if (cmd_len > h2g->info.space)
397 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
402 lockdep_assert_held(&ct->fast_lock);
404 return ct->ctbs.g2h.info.space > g2h_len;
407 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
409 lockdep_assert_held(&ct->lock);
411 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
417 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
419 lockdep_assert_held(&ct->lock);
420 ct->ctbs.h2g.info.space -= cmd_len;
423 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
425 xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
428 lockdep_assert_held(&ct->fast_lock);
430 ct->ctbs.g2h.info.space -= g2h_len;
431 ct->g2h_outstanding += num_g2h;
435 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
437 lockdep_assert_held(&ct->fast_lock);
438 xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
439 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
441 ct->ctbs.g2h.info.space += g2h_len;
442 --ct->g2h_outstanding;
445 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
447 spin_lock_irq(&ct->fast_lock);
448 __g2h_release_space(ct, g2h_len);
449 spin_unlock_irq(&ct->fast_lock);
452 #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
454 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
455 u32 ct_fence_value, bool want_response)
457 struct xe_device *xe = ct_to_xe(ct);
458 struct guc_ctb *h2g = &ct->ctbs.h2g;
459 u32 cmd[H2G_CT_HEADERS];
460 u32 tail = h2g->info.tail;
462 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
465 full_len = len + GUC_CTB_HDR_LEN;
467 lockdep_assert_held(&ct->lock);
468 xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN);
469 xe_assert(xe, tail <= h2g->info.size);
471 /* Command will wrap, zero fill (NOPs), return and check credits again */
472 if (tail + full_len > h2g->info.size) {
473 xe_map_memset(xe, &map, 0, 0,
474 (h2g->info.size - tail) * sizeof(u32));
475 h2g_reserve_space(ct, (h2g->info.size - tail));
477 desc_write(xe, h2g, tail, h2g->info.tail);
483 * dw0: CT header (including fence)
484 * dw1: HXG header (including action code)
487 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
488 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
489 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
492 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
493 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
494 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
497 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
498 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
499 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
502 /* H2G header in cmd[1] replaces action[0] so: */
506 /* Write H2G ensuring visable before descriptor update */
507 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
508 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
511 /* Update local copies */
512 h2g->info.tail = (tail + full_len) % h2g->info.size;
513 h2g_reserve_space(ct, full_len);
515 /* Update descriptor */
516 desc_write(xe, h2g, tail, h2g->info.tail);
518 trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
519 desc_read(xe, h2g, head), h2g->info.tail);
525 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
526 * driver, the GuC will just copy it to the reply message. Since we need to
527 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
528 * we use one bit of the seqno as an indicator for that and a rolling counter
529 * for the remaining 15 bits.
531 #define CT_SEQNO_MASK GENMASK(14, 0)
532 #define CT_SEQNO_UNTRACKED BIT(15)
533 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
535 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
538 seqno |= CT_SEQNO_UNTRACKED;
543 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
544 u32 len, u32 g2h_len, u32 num_g2h,
545 struct g2h_fence *g2h_fence)
547 struct xe_device *xe = ct_to_xe(ct);
551 xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
552 xe_assert(xe, !g2h_len || !g2h_fence);
553 xe_assert(xe, !num_g2h || !g2h_fence);
554 xe_assert(xe, !g2h_len || num_g2h);
555 xe_assert(xe, g2h_len || !num_g2h);
556 lockdep_assert_held(&ct->lock);
558 if (unlikely(ct->ctbs.h2g.info.broken)) {
563 if (ct->state == XE_GUC_CT_STATE_DISABLED) {
568 if (ct->state == XE_GUC_CT_STATE_STOPPED) {
573 xe_assert(xe, xe_guc_ct_enabled(ct));
576 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
579 if (g2h_fence_needs_alloc(g2h_fence)) {
582 g2h_fence->seqno = next_ct_seqno(ct, true);
583 ptr = xa_store(&ct->fence_lookup,
585 g2h_fence, GFP_ATOMIC);
592 seqno = g2h_fence->seqno;
594 seqno = next_ct_seqno(ct, false);
598 spin_lock_irq(&ct->fast_lock);
600 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
604 ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
611 __g2h_reserve_space(ct, g2h_len, num_g2h);
612 xe_guc_notify(ct_to_guc(ct));
615 spin_unlock_irq(&ct->fast_lock);
620 static void kick_reset(struct xe_guc_ct *ct)
622 xe_gt_reset_async(ct_to_gt(ct));
625 static int dequeue_one_g2h(struct xe_guc_ct *ct);
627 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
628 u32 g2h_len, u32 num_g2h,
629 struct g2h_fence *g2h_fence)
631 struct drm_device *drm = &ct_to_xe(ct)->drm;
632 struct drm_printer p = drm_info_printer(drm->dev);
633 unsigned int sleep_period_ms = 1;
636 xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
637 lockdep_assert_held(&ct->lock);
638 xe_device_assert_mem_access(ct_to_xe(ct));
641 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
645 * We wait to try to restore credits for about 1 second before bailing.
646 * In the case of H2G credits we have no choice but just to wait for the
647 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
648 * the case of G2H we process any G2H in the channel, hopefully freeing
649 * credits as we consume the G2H messages.
651 if (unlikely(ret == -EBUSY &&
652 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
653 struct guc_ctb *h2g = &ct->ctbs.h2g;
655 if (sleep_period_ms == 1024)
658 trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail,
661 len + GUC_CTB_HDR_LEN);
662 msleep(sleep_period_ms);
663 sleep_period_ms <<= 1;
666 } else if (unlikely(ret == -EBUSY)) {
667 struct xe_device *xe = ct_to_xe(ct);
668 struct guc_ctb *g2h = &ct->ctbs.g2h;
670 trace_xe_guc_ct_g2h_flow_control(g2h->info.head,
671 desc_read(xe, g2h, tail),
675 GUC_CTB_HXG_MSG_MAX_LEN :
678 #define g2h_avail(ct) \
679 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
680 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
685 if (dequeue_one_g2h(ct) < 0)
694 drm_err(drm, "No forward process on H2G, reset required");
695 xe_guc_ct_print(ct, &p, true);
696 ct->ctbs.h2g.info.broken = true;
701 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
702 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
706 xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
708 mutex_lock(&ct->lock);
709 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
710 mutex_unlock(&ct->lock);
715 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
716 u32 g2h_len, u32 num_g2h)
720 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
727 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
728 u32 g2h_len, u32 num_g2h)
732 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
739 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
743 lockdep_assert_held(&ct->lock);
745 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
753 * Check if a GT reset is in progress or will occur and if GT reset brought the
754 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
756 static bool retry_failure(struct xe_guc_ct *ct, int ret)
758 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
761 #define ct_alive(ct) \
762 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
763 !ct->ctbs.g2h.info.broken)
764 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
771 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
772 u32 *response_buffer, bool no_fail)
774 struct xe_device *xe = ct_to_xe(ct);
775 struct g2h_fence g2h_fence;
779 * We use a fence to implement blocking sends / receiving response data.
780 * The seqno of the fence is sent in the H2G, returned in the G2H, and
781 * an xarray is used as storage media with the seqno being to key.
782 * Fields in the fence hold success, failure, retry status and the
783 * response data. Safe to allocate on the stack as the xarray is the
784 * only reference and it cannot be present after this function exits.
787 g2h_fence_init(&g2h_fence, response_buffer);
789 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
790 if (unlikely(ret == -ENOMEM)) {
793 /* Retry allocation /w GFP_KERNEL */
794 ptr = xa_store(&ct->fence_lookup,
796 &g2h_fence, GFP_KERNEL);
800 goto retry_same_fence;
801 } else if (unlikely(ret)) {
805 if (no_fail && retry_failure(ct, ret))
806 goto retry_same_fence;
808 if (!g2h_fence_needs_alloc(&g2h_fence))
809 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
814 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
816 drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
817 g2h_fence.seqno, action[0]);
818 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
822 if (g2h_fence.retry) {
823 drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
824 action[0], g2h_fence.reason);
827 if (g2h_fence.fail) {
828 drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
829 action[0], g2h_fence.error, g2h_fence.hint);
833 return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
837 * xe_guc_ct_send_recv - Send and receive HXG to the GuC
838 * @ct: the &xe_guc_ct
839 * @action: the dword array with `HXG Request`_ message (can't be NULL)
840 * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
841 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
843 * Send a `HXG Request`_ message to the GuC over CT communication channel and
844 * blocks until GuC replies with a `HXG Response`_ message.
846 * For non-blocking communication with GuC use xe_guc_ct_send().
848 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
850 * Return: response length (in dwords) if &response_buffer was not NULL, or
851 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
852 * a negative error code on failure.
854 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
855 u32 *response_buffer)
857 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
858 return guc_ct_send_recv(ct, action, len, response_buffer, false);
861 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
862 u32 len, u32 *response_buffer)
864 return guc_ct_send_recv(ct, action, len, response_buffer, true);
867 static u32 *msg_to_hxg(u32 *msg)
869 return msg + GUC_CTB_MSG_MIN_LEN;
872 static u32 msg_len_to_hxg_len(u32 len)
874 return len - GUC_CTB_MSG_MIN_LEN;
877 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
879 u32 *hxg = msg_to_hxg(msg);
880 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
882 lockdep_assert_held(&ct->lock);
885 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
886 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
887 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
888 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
889 g2h_release_space(ct, len);
895 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
897 struct xe_gt *gt = ct_to_gt(ct);
898 struct xe_device *xe = gt_to_xe(gt);
899 u32 *hxg = msg_to_hxg(msg);
900 u32 hxg_len = msg_len_to_hxg_len(len);
901 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
902 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
903 struct g2h_fence *g2h_fence;
905 lockdep_assert_held(&ct->lock);
908 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
909 * Those messages should never fail, so if we do get an error back it
910 * means we're likely doing an illegal operation and the GuC is
911 * rejecting it. We have no way to inform the code that submitted the
912 * H2G that the message was rejected, so we need to escalate the
913 * failure to trigger a reset.
915 if (fence & CT_SEQNO_UNTRACKED) {
916 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
917 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
919 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
920 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
922 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
928 g2h_fence = xa_erase(&ct->fence_lookup, fence);
929 if (unlikely(!g2h_fence)) {
930 /* Don't tear down channel, as send could've timed out */
931 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
932 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
936 xe_assert(xe, fence == g2h_fence->seqno);
938 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
939 g2h_fence->fail = true;
940 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
941 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
942 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
943 g2h_fence->retry = true;
944 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
945 } else if (g2h_fence->response_buffer) {
946 g2h_fence->response_len = hxg_len;
947 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
949 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
952 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
954 g2h_fence->done = true;
957 wake_up_all(&ct->g2h_fence_wq);
962 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
964 struct xe_device *xe = ct_to_xe(ct);
965 u32 *hxg = msg_to_hxg(msg);
969 lockdep_assert_held(&ct->lock);
971 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
972 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
974 "G2H channel broken on read, origin=%d, reset required\n",
976 ct->ctbs.g2h.info.broken = true;
981 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
983 case GUC_HXG_TYPE_EVENT:
984 ret = parse_g2h_event(ct, msg, len);
986 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
987 case GUC_HXG_TYPE_RESPONSE_FAILURE:
988 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
989 ret = parse_g2h_response(ct, msg, len);
993 "G2H channel broken on read, type=%d, reset required\n",
995 ct->ctbs.g2h.info.broken = true;
1003 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1005 struct xe_device *xe = ct_to_xe(ct);
1006 struct xe_guc *guc = ct_to_guc(ct);
1007 u32 hxg_len = msg_len_to_hxg_len(len);
1008 u32 *hxg = msg_to_hxg(msg);
1009 u32 action, adj_len;
1013 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1016 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1017 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1018 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1021 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1022 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1024 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1025 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1027 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1028 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1030 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1031 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1034 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1035 /* Selftest only at the moment */
1037 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1038 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1039 /* FIXME: Handle this */
1041 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1042 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1045 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1046 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1048 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1049 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1052 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
1053 ret = xe_guc_access_counter_notify_handler(guc, payload,
1056 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1057 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1059 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1060 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1063 drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
1067 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1073 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1075 struct xe_device *xe = ct_to_xe(ct);
1076 struct guc_ctb *g2h = &ct->ctbs.g2h;
1077 u32 tail, head, len;
1082 xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
1083 lockdep_assert_held(&ct->fast_lock);
1085 if (ct->state == XE_GUC_CT_STATE_DISABLED)
1088 if (ct->state == XE_GUC_CT_STATE_STOPPED)
1091 if (g2h->info.broken)
1094 xe_assert(xe, xe_guc_ct_enabled(ct));
1096 /* Calculate DW available to read */
1097 tail = desc_read(xe, g2h, tail);
1098 avail = tail - g2h->info.head;
1099 if (unlikely(avail == 0))
1103 avail += g2h->info.size;
1106 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1108 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1111 "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1113 g2h->info.broken = true;
1118 head = (g2h->info.head + 1) % g2h->info.size;
1121 /* Read G2H message */
1122 if (avail + head > g2h->info.size) {
1123 u32 avail_til_wrap = g2h->info.size - head;
1125 xe_map_memcpy_from(xe, msg + 1,
1126 &g2h->cmds, sizeof(u32) * head,
1127 avail_til_wrap * sizeof(u32));
1128 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1130 (avail - avail_til_wrap) * sizeof(u32));
1132 xe_map_memcpy_from(xe, msg + 1,
1133 &g2h->cmds, sizeof(u32) * head,
1134 avail * sizeof(u32));
1137 hxg = msg_to_hxg(msg);
1138 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1141 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1145 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1146 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1147 break; /* Process these in fast-path */
1153 /* Update local / descriptor header */
1154 g2h->info.head = (head + avail) % g2h->info.size;
1155 desc_write(xe, g2h, head, g2h->info.head);
1157 trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len,
1158 g2h->info.head, tail);
1163 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1165 struct xe_device *xe = ct_to_xe(ct);
1166 struct xe_guc *guc = ct_to_guc(ct);
1167 u32 hxg_len = msg_len_to_hxg_len(len);
1168 u32 *hxg = msg_to_hxg(msg);
1169 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1170 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1171 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1175 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1176 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1178 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1179 __g2h_release_space(ct, len);
1180 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1184 drm_warn(&xe->drm, "NOT_POSSIBLE");
1188 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1193 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1194 * @ct: GuC CT object
1196 * Anything related to page faults is critical for performance, process these
1197 * critical G2H in the IRQ. This is safe as these handlers either just wake up
1198 * waiters or queue another worker.
1200 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1202 struct xe_device *xe = ct_to_xe(ct);
1206 ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1207 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1210 spin_lock(&ct->fast_lock);
1212 len = g2h_read(ct, ct->fast_msg, true);
1214 g2h_fast_path(ct, ct->fast_msg, len);
1216 spin_unlock(&ct->fast_lock);
1219 xe_device_mem_access_put(xe);
1222 /* Returns less than zero on error, 0 on done, 1 on more available */
1223 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1228 lockdep_assert_held(&ct->lock);
1230 spin_lock_irq(&ct->fast_lock);
1231 len = g2h_read(ct, ct->msg, false);
1232 spin_unlock_irq(&ct->fast_lock);
1236 ret = parse_g2h_msg(ct, ct->msg, len);
1237 if (unlikely(ret < 0))
1240 ret = process_g2h_msg(ct, ct->msg, len);
1241 if (unlikely(ret < 0))
1247 static void g2h_worker_func(struct work_struct *w)
1249 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1254 * Normal users must always hold mem_access.ref around CT calls. However
1255 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1256 * at this stage we can't rely on mem_access.ref and even the
1257 * callback_task will be different than current. For such cases we just
1258 * need to ensure we always process the responses from any blocking
1259 * ct_send requests or where we otherwise expect some response when
1260 * initiated from those callbacks (which will need to wait for the below
1261 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
1262 * the device has suspended to the point that the CT communication has
1265 * If we are inside the runtime pm callback, we can be the only task
1266 * still issuing CT requests (since that requires having the
1267 * mem_access.ref). It seems like it might in theory be possible to
1268 * receive unsolicited events from the GuC just as we are
1269 * suspending-resuming, but those will currently anyway be lost when
1270 * eventually exiting from suspend, hence no need to wake up the device
1271 * here. If we ever need something stronger than get_if_ongoing() then
1272 * we need to be careful with blocking the pm callbacks from getting CT
1273 * responses, if the worker here is blocked on those callbacks
1274 * completing, creating a deadlock.
1276 ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1277 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1281 mutex_lock(&ct->lock);
1282 ret = dequeue_one_g2h(ct);
1283 mutex_unlock(&ct->lock);
1285 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1286 struct drm_device *drm = &ct_to_xe(ct)->drm;
1287 struct drm_printer p = drm_info_printer(drm->dev);
1289 xe_guc_ct_print(ct, &p, false);
1295 xe_device_mem_access_put(ct_to_xe(ct));
1298 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1299 struct guc_ctb_snapshot *snapshot,
1304 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1305 sizeof(struct guc_ct_buffer_desc));
1306 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1308 snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
1309 atomic ? GFP_ATOMIC : GFP_KERNEL);
1311 if (!snapshot->cmds) {
1312 drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
1316 head = snapshot->desc.head;
1317 tail = snapshot->desc.tail;
1320 struct iosys_map map =
1321 IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
1323 while (head != tail) {
1324 snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
1326 if (head == ctb->info.size) {
1330 iosys_map_incr(&map, sizeof(u32));
1336 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1337 struct drm_printer *p)
1341 drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1342 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1343 drm_printf(p, "\thead: %d\n", snapshot->info.head);
1344 drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1345 drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1346 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1347 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1348 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1349 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1351 if (!snapshot->cmds)
1354 head = snapshot->desc.head;
1355 tail = snapshot->desc.tail;
1357 while (head != tail) {
1358 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
1359 snapshot->cmds[head]);
1361 if (head == snapshot->info.size)
1366 static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
1368 kfree(snapshot->cmds);
1372 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1373 * @ct: GuC CT object.
1374 * @atomic: Boolean to indicate if this is called from atomic context like
1375 * reset or CTB handler or from some regular path like debugfs.
1377 * This can be printed out in a later stage like during dev_coredump
1380 * Returns: a GuC CT snapshot object that must be freed by the caller
1381 * by using `xe_guc_ct_snapshot_free`.
1383 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
1386 struct xe_device *xe = ct_to_xe(ct);
1387 struct xe_guc_ct_snapshot *snapshot;
1389 snapshot = kzalloc(sizeof(*snapshot),
1390 atomic ? GFP_ATOMIC : GFP_KERNEL);
1393 drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
1397 if (xe_guc_ct_enabled(ct)) {
1398 snapshot->ct_enabled = true;
1399 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1400 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
1401 &snapshot->h2g, atomic);
1402 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
1403 &snapshot->g2h, atomic);
1410 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1411 * @snapshot: GuC CT snapshot object.
1412 * @p: drm_printer where it will be printed out.
1414 * This function prints out a given GuC CT snapshot object.
1416 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1417 struct drm_printer *p)
1422 if (snapshot->ct_enabled) {
1423 drm_puts(p, "H2G CTB (all sizes in DW):\n");
1424 guc_ctb_snapshot_print(&snapshot->h2g, p);
1426 drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
1427 guc_ctb_snapshot_print(&snapshot->g2h, p);
1429 drm_printf(p, "\tg2h outstanding: %d\n",
1430 snapshot->g2h_outstanding);
1432 drm_puts(p, "CT disabled\n");
1437 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1438 * @snapshot: GuC CT snapshot object.
1440 * This function free all the memory that needed to be allocated at capture
1443 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
1448 guc_ctb_snapshot_free(&snapshot->h2g);
1449 guc_ctb_snapshot_free(&snapshot->g2h);
1454 * xe_guc_ct_print - GuC CT Print.
1456 * @p: drm_printer where it will be printed out.
1457 * @atomic: Boolean to indicate if this is called from atomic context like
1458 * reset or CTB handler or from some regular path like debugfs.
1460 * This function quickly capture a snapshot and immediately print it out.
1462 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
1464 struct xe_guc_ct_snapshot *snapshot;
1466 snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
1467 xe_guc_ct_snapshot_print(snapshot, p);
1468 xe_guc_ct_snapshot_free(snapshot);