1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/delay.h>
12 #include <drm/drm_managed.h>
15 #include "xe_device.h"
17 #include "xe_gt_pagefault.h"
18 #include "xe_gt_tlb_invalidation.h"
20 #include "xe_guc_submit.h"
24 /* Used when a CT send wants to block and / or receive data */
37 static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
39 g2h_fence->response_buffer = response_buffer;
40 g2h_fence->response_len = 0;
41 g2h_fence->fail = false;
42 g2h_fence->retry = false;
43 g2h_fence->done = false;
44 g2h_fence->seqno = ~0x0;
47 static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
49 return g2h_fence->seqno == ~0x0;
52 static struct xe_guc *
53 ct_to_guc(struct xe_guc_ct *ct)
55 return container_of(ct, struct xe_guc, ct);
59 ct_to_gt(struct xe_guc_ct *ct)
61 return container_of(ct, struct xe_gt, uc.guc.ct);
64 static struct xe_device *
65 ct_to_xe(struct xe_guc_ct *ct)
67 return gt_to_xe(ct_to_gt(ct));
73 * We allocate single blob to hold both CTB descriptors and buffers:
75 * +--------+-----------------------------------------------+------+
76 * | offset | contents | size |
77 * +========+===============================================+======+
78 * | 0x0000 | H2G CTB Descriptor (send) | |
79 * +--------+-----------------------------------------------+ 4K |
80 * | 0x0800 | G2H CTB Descriptor (g2h) | |
81 * +--------+-----------------------------------------------+------+
82 * | 0x1000 | H2G CT Buffer (send) | n*4K |
84 * +--------+-----------------------------------------------+------+
85 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
87 * +--------+-----------------------------------------------+------+
89 * Size of each ``CT Buffer`` must be multiple of 4K.
90 * We don't expect too many messages in flight at any time, unless we are
91 * using the GuC submission. In that case each request requires a minimum
92 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
93 * enough space to avoid backpressure on the driver. We increase the size
94 * of the receive buffer (relative to the send) to ensure a G2H response
95 * CTB has a landing spot.
98 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
99 #define CTB_H2G_BUFFER_SIZE (SZ_4K)
100 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
101 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
103 static size_t guc_ct_size(void)
105 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
109 static void guc_ct_fini(struct drm_device *drm, void *arg)
111 struct xe_guc_ct *ct = arg;
113 xa_destroy(&ct->fence_lookup);
114 xe_bo_unpin_map_no_vm(ct->bo);
117 static void g2h_worker_func(struct work_struct *w);
119 static void primelockdep(struct xe_guc_ct *ct)
121 if (!IS_ENABLED(CONFIG_LOCKDEP))
124 fs_reclaim_acquire(GFP_KERNEL);
125 might_lock(&ct->lock);
126 fs_reclaim_release(GFP_KERNEL);
129 int xe_guc_ct_init(struct xe_guc_ct *ct)
131 struct xe_device *xe = ct_to_xe(ct);
132 struct xe_gt *gt = ct_to_gt(ct);
136 XE_BUG_ON(guc_ct_size() % PAGE_SIZE);
138 mutex_init(&ct->lock);
139 spin_lock_init(&ct->fast_lock);
140 xa_init(&ct->fence_lookup);
141 ct->fence_context = dma_fence_context_alloc(1);
142 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
143 init_waitqueue_head(&ct->wq);
144 init_waitqueue_head(&ct->g2h_fence_wq);
148 bo = xe_bo_create_pin_map(xe, gt, NULL, guc_ct_size(),
150 XE_BO_CREATE_VRAM_IF_DGFX(gt) |
151 XE_BO_CREATE_GGTT_BIT);
157 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
164 #define desc_read(xe_, guc_ctb__, field_) \
165 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
166 struct guc_ct_buffer_desc, field_)
168 #define desc_write(xe_, guc_ctb__, field_, val_) \
169 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
170 struct guc_ct_buffer_desc, field_, val_)
172 static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
173 struct iosys_map *map)
175 h2g->size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
179 h2g->space = CIRC_SPACE(h2g->tail, h2g->head, h2g->size) -
184 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
186 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
189 static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
190 struct iosys_map *map)
192 g2h->size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
193 g2h->resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
196 g2h->space = CIRC_SPACE(g2h->tail, g2h->head, g2h->size) -
200 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
201 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
203 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
204 CTB_H2G_BUFFER_SIZE);
207 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
209 struct xe_guc *guc = ct_to_guc(ct);
210 u32 desc_addr, ctb_addr, size;
213 desc_addr = xe_bo_ggtt_addr(ct->bo);
214 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
215 size = ct->ctbs.h2g.size * sizeof(u32);
217 err = xe_guc_self_cfg64(guc,
218 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
223 err = xe_guc_self_cfg64(guc,
224 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
229 return xe_guc_self_cfg32(guc,
230 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
234 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
236 struct xe_guc *guc = ct_to_guc(ct);
237 u32 desc_addr, ctb_addr, size;
240 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
241 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
243 size = ct->ctbs.g2h.size * sizeof(u32);
245 err = xe_guc_self_cfg64(guc,
246 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
251 err = xe_guc_self_cfg64(guc,
252 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
257 return xe_guc_self_cfg32(guc,
258 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
262 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
264 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
265 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
266 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
267 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
268 GUC_ACTION_HOST2GUC_CONTROL_CTB),
269 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
270 enable ? GUC_CTB_CONTROL_ENABLE :
271 GUC_CTB_CONTROL_DISABLE),
273 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
275 return ret > 0 ? -EPROTO : ret;
278 int xe_guc_ct_enable(struct xe_guc_ct *ct)
280 struct xe_device *xe = ct_to_xe(ct);
283 XE_BUG_ON(ct->enabled);
285 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
286 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
288 err = guc_ct_ctb_h2g_register(ct);
292 err = guc_ct_ctb_g2h_register(ct);
296 err = guc_ct_control_toggle(ct, true);
300 mutex_lock(&ct->lock);
301 ct->g2h_outstanding = 0;
303 mutex_unlock(&ct->lock);
306 wake_up_all(&ct->wq);
307 drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
312 drm_err(&xe->drm, "Failed to enabled CT (%d)\n", err);
317 void xe_guc_ct_disable(struct xe_guc_ct *ct)
319 mutex_lock(&ct->lock);
321 mutex_unlock(&ct->lock);
323 xa_destroy(&ct->fence_lookup);
326 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
328 struct guc_ctb *h2g = &ct->ctbs.h2g;
330 lockdep_assert_held(&ct->lock);
332 if (cmd_len > h2g->space) {
333 h2g->head = desc_read(ct_to_xe(ct), h2g, head);
334 h2g->space = CIRC_SPACE(h2g->tail, h2g->head, h2g->size) -
336 if (cmd_len > h2g->space)
343 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
345 lockdep_assert_held(&ct->lock);
347 return ct->ctbs.g2h.space > g2h_len;
350 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
352 lockdep_assert_held(&ct->lock);
354 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
360 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
362 lockdep_assert_held(&ct->lock);
363 ct->ctbs.h2g.space -= cmd_len;
366 static void g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
368 XE_BUG_ON(g2h_len > ct->ctbs.g2h.space);
371 spin_lock_irq(&ct->fast_lock);
372 ct->ctbs.g2h.space -= g2h_len;
373 ct->g2h_outstanding += num_g2h;
374 spin_unlock_irq(&ct->fast_lock);
378 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
380 lockdep_assert_held(&ct->fast_lock);
381 XE_WARN_ON(ct->ctbs.g2h.space + g2h_len >
382 ct->ctbs.g2h.size - ct->ctbs.g2h.resv_space);
384 ct->ctbs.g2h.space += g2h_len;
385 --ct->g2h_outstanding;
388 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
390 spin_lock_irq(&ct->fast_lock);
391 __g2h_release_space(ct, g2h_len);
392 spin_unlock_irq(&ct->fast_lock);
395 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
396 u32 ct_fence_value, bool want_response)
398 struct xe_device *xe = ct_to_xe(ct);
399 struct guc_ctb *h2g = &ct->ctbs.h2g;
400 u32 cmd[GUC_CTB_MSG_MAX_LEN / sizeof(u32)];
401 u32 cmd_len = len + GUC_CTB_HDR_LEN;
403 u32 tail = h2g->tail;
404 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
407 lockdep_assert_held(&ct->lock);
408 XE_BUG_ON(len * sizeof(u32) > GUC_CTB_MSG_MAX_LEN);
409 XE_BUG_ON(tail > h2g->size);
411 /* Command will wrap, zero fill (NOPs), return and check credits again */
412 if (tail + cmd_len > h2g->size) {
413 xe_map_memset(xe, &map, 0, 0, (h2g->size - tail) * sizeof(u32));
414 h2g_reserve_space(ct, (h2g->size - tail));
416 desc_write(xe, h2g, tail, h2g->tail);
422 * dw0: CT header (including fence)
423 * dw1: HXG header (including action code)
426 cmd[cmd_idx++] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
427 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
428 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
431 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
432 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
433 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
436 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
437 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
438 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
440 for (i = 1; i < len; ++i)
441 cmd[cmd_idx++] = action[i];
443 /* Write H2G ensuring visable before descriptor update */
444 xe_map_memcpy_to(xe, &map, 0, cmd, cmd_len * sizeof(u32));
445 xe_device_wmb(ct_to_xe(ct));
447 /* Update local copies */
448 h2g->tail = (tail + cmd_len) % h2g->size;
449 h2g_reserve_space(ct, cmd_len);
451 /* Update descriptor */
452 desc_write(xe, h2g, tail, h2g->tail);
457 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
458 u32 len, u32 g2h_len, u32 num_g2h,
459 struct g2h_fence *g2h_fence)
463 XE_BUG_ON(g2h_len && g2h_fence);
464 XE_BUG_ON(num_g2h && g2h_fence);
465 XE_BUG_ON(g2h_len && !num_g2h);
466 XE_BUG_ON(!g2h_len && num_g2h);
467 lockdep_assert_held(&ct->lock);
469 if (unlikely(ct->ctbs.h2g.broken)) {
474 if (unlikely(!ct->enabled)) {
480 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
483 if (g2h_fence_needs_alloc(g2h_fence)) {
486 g2h_fence->seqno = (ct->fence_seqno++ & 0xffff);
487 ptr = xa_store(&ct->fence_lookup,
489 g2h_fence, GFP_ATOMIC);
497 xe_device_mem_access_get(ct_to_xe(ct));
499 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
503 ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0,
511 g2h_reserve_space(ct, g2h_len, num_g2h);
512 xe_guc_notify(ct_to_guc(ct));
514 xe_device_mem_access_put(ct_to_xe(ct));
520 static void kick_reset(struct xe_guc_ct *ct)
522 xe_gt_reset_async(ct_to_gt(ct));
525 static int dequeue_one_g2h(struct xe_guc_ct *ct);
527 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
528 u32 g2h_len, u32 num_g2h,
529 struct g2h_fence *g2h_fence)
531 struct drm_device *drm = &ct_to_xe(ct)->drm;
532 struct drm_printer p = drm_info_printer(drm->dev);
533 unsigned int sleep_period_ms = 1;
536 XE_BUG_ON(g2h_len && g2h_fence);
537 lockdep_assert_held(&ct->lock);
540 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
544 * We wait to try to restore credits for about 1 second before bailing.
545 * In the case of H2G credits we have no choice but just to wait for the
546 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
547 * the case of G2H we process any G2H in the channel, hopefully freeing
548 * credits as we consume the G2H messages.
550 if (unlikely(ret == -EBUSY &&
551 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
552 struct guc_ctb *h2g = &ct->ctbs.h2g;
554 if (sleep_period_ms == 1024)
557 trace_xe_guc_ct_h2g_flow_control(h2g->head, h2g->tail,
558 h2g->size, h2g->space,
559 len + GUC_CTB_HDR_LEN);
560 msleep(sleep_period_ms);
561 sleep_period_ms <<= 1;
564 } else if (unlikely(ret == -EBUSY)) {
565 struct xe_device *xe = ct_to_xe(ct);
566 struct guc_ctb *g2h = &ct->ctbs.g2h;
568 trace_xe_guc_ct_g2h_flow_control(g2h->head,
569 desc_read(xe, g2h, tail),
570 g2h->size, g2h->space,
572 GUC_CTB_HXG_MSG_MAX_LEN :
575 #define g2h_avail(ct) \
576 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.head)
577 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
582 if (dequeue_one_g2h(ct) < 0)
591 drm_err(drm, "No forward process on H2G, reset required");
592 xe_guc_ct_print(ct, &p);
593 ct->ctbs.h2g.broken = true;
598 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
599 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
603 XE_BUG_ON(g2h_len && g2h_fence);
605 mutex_lock(&ct->lock);
606 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
607 mutex_unlock(&ct->lock);
612 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
613 u32 g2h_len, u32 num_g2h)
617 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
624 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
625 u32 g2h_len, u32 num_g2h)
629 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
636 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
640 lockdep_assert_held(&ct->lock);
642 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
650 * Check if a GT reset is in progress or will occur and if GT reset brought the
651 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
653 static bool retry_failure(struct xe_guc_ct *ct, int ret)
655 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
658 #define ct_alive(ct) \
659 (ct->enabled && !ct->ctbs.h2g.broken && !ct->ctbs.g2h.broken)
660 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
667 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
668 u32 *response_buffer, bool no_fail)
670 struct xe_device *xe = ct_to_xe(ct);
671 struct g2h_fence g2h_fence;
675 * We use a fence to implement blocking sends / receiving response data.
676 * The seqno of the fence is sent in the H2G, returned in the G2H, and
677 * an xarray is used as storage media with the seqno being to key.
678 * Fields in the fence hold success, failure, retry status and the
679 * response data. Safe to allocate on the stack as the xarray is the
680 * only reference and it cannot be present after this function exits.
683 g2h_fence_init(&g2h_fence, response_buffer);
685 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
686 if (unlikely(ret == -ENOMEM)) {
689 /* Retry allocation /w GFP_KERNEL */
690 ptr = xa_store(&ct->fence_lookup,
692 &g2h_fence, GFP_KERNEL);
697 goto retry_same_fence;
698 } else if (unlikely(ret)) {
702 if (no_fail && retry_failure(ct, ret))
703 goto retry_same_fence;
705 if (!g2h_fence_needs_alloc(&g2h_fence))
706 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
711 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
713 drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
714 g2h_fence.seqno, action[0]);
715 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
719 if (g2h_fence.retry) {
720 drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
721 action[0], g2h_fence.reason);
724 if (g2h_fence.fail) {
725 drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
726 action[0], g2h_fence.error, g2h_fence.hint);
730 return ret > 0 ? 0 : ret;
733 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
734 u32 *response_buffer)
736 return guc_ct_send_recv(ct, action, len, response_buffer, false);
739 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
740 u32 len, u32 *response_buffer)
742 return guc_ct_send_recv(ct, action, len, response_buffer, true);
745 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
747 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
749 lockdep_assert_held(&ct->lock);
752 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
753 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
754 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
755 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
756 g2h_release_space(ct, len);
762 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
764 struct xe_device *xe = ct_to_xe(ct);
765 u32 response_len = len - GUC_CTB_MSG_MIN_LEN;
766 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
767 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]);
768 struct g2h_fence *g2h_fence;
770 lockdep_assert_held(&ct->lock);
772 g2h_fence = xa_erase(&ct->fence_lookup, fence);
773 if (unlikely(!g2h_fence)) {
774 /* Don't tear down channel, as send could've timed out */
775 drm_warn(&xe->drm, "G2H fence (%u) not found!\n", fence);
776 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
780 XE_WARN_ON(fence != g2h_fence->seqno);
782 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
783 g2h_fence->fail = true;
785 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]);
787 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]);
788 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
789 g2h_fence->retry = true;
791 FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, msg[0]);
792 } else if (g2h_fence->response_buffer) {
793 g2h_fence->response_len = response_len;
794 memcpy(g2h_fence->response_buffer, msg + GUC_CTB_MSG_MIN_LEN,
795 response_len * sizeof(u32));
798 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
800 g2h_fence->done = true;
803 wake_up_all(&ct->g2h_fence_wq);
808 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
810 struct xe_device *xe = ct_to_xe(ct);
811 u32 header, hxg, origin, type;
814 lockdep_assert_held(&ct->lock);
819 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg);
820 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
822 "G2H channel broken on read, origin=%d, reset required\n",
824 ct->ctbs.g2h.broken = true;
829 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg);
831 case GUC_HXG_TYPE_EVENT:
832 ret = parse_g2h_event(ct, msg, len);
834 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
835 case GUC_HXG_TYPE_RESPONSE_FAILURE:
836 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
837 ret = parse_g2h_response(ct, msg, len);
841 "G2H channel broken on read, type=%d, reset required\n",
843 ct->ctbs.g2h.broken = true;
851 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
853 struct xe_device *xe = ct_to_xe(ct);
854 struct xe_guc *guc = ct_to_guc(ct);
855 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
856 u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
857 u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
860 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
864 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
865 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
867 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
868 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
870 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
871 ret = xe_guc_engine_reset_handler(guc, payload, adj_len);
873 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
874 ret = xe_guc_engine_reset_failure_handler(guc, payload,
877 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
878 /* Selftest only at the moment */
880 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
881 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
882 /* FIXME: Handle this */
884 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
885 ret = xe_guc_engine_memory_cat_error_handler(guc, payload,
888 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
889 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
891 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
892 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
895 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
896 ret = xe_guc_access_counter_notify_handler(guc, payload,
900 drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
904 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
910 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
912 struct xe_device *xe = ct_to_xe(ct);
913 struct guc_ctb *g2h = &ct->ctbs.g2h;
917 lockdep_assert_held(&ct->fast_lock);
925 /* Calculate DW available to read */
926 tail = desc_read(xe, g2h, tail);
927 avail = tail - g2h->head;
928 if (unlikely(avail == 0))
935 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->head, sizeof(u32));
936 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
939 "G2H channel broken on read, avail=%d, len=%d, reset required\n",
946 head = (g2h->head + 1) % g2h->size;
949 /* Read G2H message */
950 if (avail + head > g2h->size) {
951 u32 avail_til_wrap = g2h->size - head;
953 xe_map_memcpy_from(xe, msg + 1,
954 &g2h->cmds, sizeof(u32) * head,
955 avail_til_wrap * sizeof(u32));
956 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
958 (avail - avail_til_wrap) * sizeof(u32));
960 xe_map_memcpy_from(xe, msg + 1,
961 &g2h->cmds, sizeof(u32) * head,
962 avail * sizeof(u32));
966 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
969 switch (FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1])) {
971 * FIXME: We really should process
972 * XE_GUC_ACTION_TLB_INVALIDATION_DONE here in the fast-path as
973 * these critical for page fault performance. We currently can't
974 * due to TLB invalidation done algorithm expecting the seqno
975 * returned in-order. With some small changes to the algorithm
976 * and locking we should be able to support out-of-order seqno.
978 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
979 break; /* Process these in fast-path */
985 /* Update local / descriptor header */
986 g2h->head = (head + avail) % g2h->size;
987 desc_write(xe, g2h, head, g2h->head);
992 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
994 struct xe_device *xe = ct_to_xe(ct);
995 struct xe_guc *guc = ct_to_guc(ct);
996 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
997 u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
998 u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
1002 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1003 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1005 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1006 __g2h_release_space(ct, len);
1007 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1011 XE_WARN_ON("NOT_POSSIBLE");
1015 drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
1020 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1021 * @ct: GuC CT object
1023 * Anything related to page faults is critical for performance, process these
1024 * critical G2H in the IRQ. This is safe as these handlers either just wake up
1025 * waiters or queue another worker.
1027 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1029 struct xe_device *xe = ct_to_xe(ct);
1032 if (!xe_device_in_fault_mode(xe) || !xe_device_mem_access_ongoing(xe))
1035 spin_lock(&ct->fast_lock);
1037 len = g2h_read(ct, ct->fast_msg, true);
1039 g2h_fast_path(ct, ct->fast_msg, len);
1041 spin_unlock(&ct->fast_lock);
1044 /* Returns less than zero on error, 0 on done, 1 on more available */
1045 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1050 lockdep_assert_held(&ct->lock);
1052 spin_lock_irq(&ct->fast_lock);
1053 len = g2h_read(ct, ct->msg, false);
1054 spin_unlock_irq(&ct->fast_lock);
1058 ret = parse_g2h_msg(ct, ct->msg, len);
1059 if (unlikely(ret < 0))
1062 ret = process_g2h_msg(ct, ct->msg, len);
1063 if (unlikely(ret < 0))
1069 static void g2h_worker_func(struct work_struct *w)
1071 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1074 xe_device_mem_access_get(ct_to_xe(ct));
1076 mutex_lock(&ct->lock);
1077 ret = dequeue_one_g2h(ct);
1078 mutex_unlock(&ct->lock);
1080 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1081 struct drm_device *drm = &ct_to_xe(ct)->drm;
1082 struct drm_printer p = drm_info_printer(drm->dev);
1084 xe_guc_ct_print(ct, &p);
1088 xe_device_mem_access_put(ct_to_xe(ct));
1091 static void guc_ct_ctb_print(struct xe_device *xe, struct guc_ctb *ctb,
1092 struct drm_printer *p)
1096 drm_printf(p, "\tsize: %d\n", ctb->size);
1097 drm_printf(p, "\tresv_space: %d\n", ctb->resv_space);
1098 drm_printf(p, "\thead: %d\n", ctb->head);
1099 drm_printf(p, "\ttail: %d\n", ctb->tail);
1100 drm_printf(p, "\tspace: %d\n", ctb->space);
1101 drm_printf(p, "\tbroken: %d\n", ctb->broken);
1103 head = desc_read(xe, ctb, head);
1104 tail = desc_read(xe, ctb, tail);
1105 drm_printf(p, "\thead (memory): %d\n", head);
1106 drm_printf(p, "\ttail (memory): %d\n", tail);
1107 drm_printf(p, "\tstatus (memory): 0x%x\n", desc_read(xe, ctb, status));
1110 struct iosys_map map =
1111 IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
1113 while (head != tail) {
1114 drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
1115 xe_map_rd(xe, &map, 0, u32));
1117 if (head == ctb->size) {
1121 iosys_map_incr(&map, sizeof(u32));
1127 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p)
1130 drm_puts(p, "\nH2G CTB (all sizes in DW):\n");
1131 guc_ct_ctb_print(ct_to_xe(ct), &ct->ctbs.h2g, p);
1133 drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
1134 guc_ct_ctb_print(ct_to_xe(ct), &ct->ctbs.g2h, p);
1135 drm_printf(p, "\tg2h outstanding: %d\n", ct->g2h_outstanding);
1137 drm_puts(p, "\nCT disabled\n");
1141 #ifdef XE_GUC_CT_SELFTEST
1143 * Disable G2H processing in IRQ handler to force xe_guc_ct_send to enter flow
1144 * control if enough sent, 8k sends is enough. Verify forward process, verify
1145 * credits expected values on exit.
1147 void xe_guc_ct_selftest(struct xe_guc_ct *ct, struct drm_printer *p)
1149 struct guc_ctb *g2h = &ct->ctbs.g2h;
1150 u32 action[] = { XE_GUC_ACTION_SCHED_ENGINE_MODE_SET, 0, 0, 1, };
1151 u32 bad_action[] = { XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, 0, 0, };
1155 ct->suppress_irq_handler = true;
1156 drm_puts(p, "Starting GuC CT selftest\n");
1158 for (i = 0; i < 8192; ++i) {
1159 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 4, 1);
1161 drm_printf(p, "Aborted pass %d, ret %d\n", i, ret);
1162 xe_guc_ct_print(ct, p);
1167 ct->suppress_irq_handler = false;
1169 xe_guc_ct_irq_handler(ct);
1172 CIRC_SPACE(0, 0, g2h->size) - g2h->resv_space) {
1173 drm_printf(p, "Mismatch on space %d, %d\n",
1175 CIRC_SPACE(0, 0, g2h->size) -
1179 if (ct->g2h_outstanding) {
1180 drm_printf(p, "Outstanding G2H, %d\n",
1181 ct->g2h_outstanding);
1186 /* Check failure path for blocking CTs too */
1187 xe_guc_ct_send_block(ct, bad_action, ARRAY_SIZE(bad_action));
1189 CIRC_SPACE(0, 0, g2h->size) - g2h->resv_space) {
1190 drm_printf(p, "Mismatch on space %d, %d\n",
1192 CIRC_SPACE(0, 0, g2h->size) -
1196 if (ct->g2h_outstanding) {
1197 drm_printf(p, "Outstanding G2H, %d\n",
1198 ct->g2h_outstanding);
1202 drm_printf(p, "GuC CT selftest done - %s\n", ret ? "FAIL" : "PASS");