xhci: dbc: decouple endpoint allocation from initialization
authorMathias Nyman <mathias.nyman@linux.intel.com>
Tue, 2 Sep 2025 10:53:04 +0000 (13:53 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 6 Sep 2025 13:29:18 +0000 (15:29 +0200)
Decouple allocation of endpoint ring buffer from initialization
of the buffer, and initialization of endpoint context parts from
from the rest of the contexts.

It allows driver to clear up and reinitialize endpoint rings
after disconnect without reallocating everything.

This is a prerequisite for the next patch that prevents the transfer
ring from filling up with cancelled (no-op) TRBs if a debug cable is
reconnected several times without transferring anything.

Cc: stable@vger.kernel.org
Fixes: dfba2174dc42 ("usb: xhci: Add DbC support in xHCI driver")
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20250902105306.877476-2-mathias.nyman@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/usb/host/xhci-dbgcap.c

index 06a2edb9e86ef754a7bfc76f33da0e5ad504d6ae..d0faff233e3e33a4efa078b1e92c3f73a39f3636 100644 (file)
@@ -101,13 +101,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
        return string_length;
 }
 
+static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
+{
+       struct xhci_ep_ctx      *ep_ctx;
+       unsigned int            max_burst;
+       dma_addr_t              deq;
+
+       max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+
+       /* Populate bulk out endpoint context: */
+       ep_ctx                  = dbc_bulkout_ctx(dbc);
+       deq                     = dbc_bulkout_enq(dbc);
+       ep_ctx->ep_info         = 0;
+       ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+       ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
+
+       /* Populate bulk in endpoint context: */
+       ep_ctx                  = dbc_bulkin_ctx(dbc);
+       deq                     = dbc_bulkin_enq(dbc);
+       ep_ctx->ep_info         = 0;
+       ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+       ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+}
+
 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
 {
        struct dbc_info_context *info;
-       struct xhci_ep_ctx      *ep_ctx;
        u32                     dev_info;
-       dma_addr_t              deq, dma;
-       unsigned int            max_burst;
+       dma_addr_t              dma;
 
        if (!dbc)
                return;
@@ -121,20 +142,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
        info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
        info->length            = cpu_to_le32(string_length);
 
-       /* Populate bulk out endpoint context: */
-       ep_ctx                  = dbc_bulkout_ctx(dbc);
-       max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
-       deq                     = dbc_bulkout_enq(dbc);
-       ep_ctx->ep_info         = 0;
-       ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
-       ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
-
-       /* Populate bulk in endpoint context: */
-       ep_ctx                  = dbc_bulkin_ctx(dbc);
-       deq                     = dbc_bulkin_enq(dbc);
-       ep_ctx->ep_info         = 0;
-       ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
-       ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+       /* Populate bulk in and out endpoint contexts: */
+       xhci_dbc_init_ep_contexts(dbc);
 
        /* Set DbC context and info registers: */
        lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
@@ -436,6 +445,23 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
        return ctx;
 }
 
+static void xhci_dbc_ring_init(struct xhci_ring *ring)
+{
+       struct xhci_segment *seg = ring->first_seg;
+
+       /* clear all trbs on ring in case of old ring */
+       memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
+
+       /* Only event ring does not use link TRB */
+       if (ring->type != TYPE_EVENT) {
+               union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+
+               trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
+               trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+       }
+       xhci_initialize_ring_info(ring);
+}
+
 static struct xhci_ring *
 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
 {
@@ -464,15 +490,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
 
        seg->dma = dma;
 
-       /* Only event ring does not use link TRB */
-       if (type != TYPE_EVENT) {
-               union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
-
-               trb->link.segment_ptr = cpu_to_le64(dma);
-               trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
-       }
        INIT_LIST_HEAD(&ring->td_list);
-       xhci_initialize_ring_info(ring);
+
+       xhci_dbc_ring_init(ring);
+
        return ring;
 dma_fail:
        kfree(seg);