1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID 8
19 #define TB_PCI_PATH_DOWN 0
20 #define TB_PCI_PATH_UP 1
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID 8
25 #define TB_USB3_PATH_DOWN 0
26 #define TB_USB3_PATH_UP 1
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID 8
30 #define TB_DP_AUX_RX_HOPID 8
31 #define TB_DP_VIDEO_HOPID 9
33 #define TB_DP_VIDEO_PATH_OUT 0
34 #define TB_DP_AUX_PATH_OUT 1
35 #define TB_DP_AUX_PATH_IN 2
37 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
39 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
41 struct tb_tunnel *__tunnel = (tunnel); \
42 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
43 tb_route(__tunnel->src_port->sw), \
44 __tunnel->src_port->port, \
45 tb_route(__tunnel->dst_port->sw), \
46 __tunnel->dst_port->port, \
47 tb_tunnel_names[__tunnel->type], \
51 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
52 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
53 #define tb_tunnel_warn(tunnel, fmt, arg...) \
54 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
55 #define tb_tunnel_info(tunnel, fmt, arg...) \
56 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
57 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
58 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
60 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
61 enum tb_tunnel_type type)
63 struct tb_tunnel *tunnel;
65 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
69 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
71 tb_tunnel_free(tunnel);
75 INIT_LIST_HEAD(&tunnel->list);
77 tunnel->npaths = npaths;
83 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
87 res = tb_pci_port_enable(tunnel->src_port, activate);
91 if (tb_port_is_pcie_up(tunnel->dst_port))
92 return tb_pci_port_enable(tunnel->dst_port, activate);
97 static int tb_initial_credits(const struct tb_switch *sw)
99 /* If the path is complete sw is not NULL */
101 /* More credits for faster link */
102 switch (sw->link_speed * sw->link_width) {
113 static void tb_pci_init_path(struct tb_path *path)
115 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
116 path->egress_shared_buffer = TB_PATH_NONE;
117 path->ingress_fc_enable = TB_PATH_ALL;
118 path->ingress_shared_buffer = TB_PATH_NONE;
121 path->drop_packages = 0;
122 path->nfc_credits = 0;
123 path->hops[0].initial_credits = 7;
124 if (path->path_length > 1)
125 path->hops[1].initial_credits =
126 tb_initial_credits(path->hops[1].in_port->sw);
130 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
131 * @tb: Pointer to the domain structure
132 * @down: PCIe downstream adapter
134 * If @down adapter is active, follows the tunnel to the PCIe upstream
135 * adapter and back. Returns the discovered tunnel or %NULL if there was
138 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
140 struct tb_tunnel *tunnel;
141 struct tb_path *path;
143 if (!tb_pci_port_is_enabled(down))
146 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
150 tunnel->activate = tb_pci_activate;
151 tunnel->src_port = down;
154 * Discover both paths even if they are not complete. We will
155 * clean them up by calling tb_tunnel_deactivate() below in that
158 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
159 &tunnel->dst_port, "PCIe Up");
161 /* Just disable the downstream port */
162 tb_pci_port_enable(down, false);
165 tunnel->paths[TB_PCI_PATH_UP] = path;
166 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
168 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
172 tunnel->paths[TB_PCI_PATH_DOWN] = path;
173 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
175 /* Validate that the tunnel is complete */
176 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
177 tb_port_warn(tunnel->dst_port,
178 "path does not end on a PCIe adapter, cleaning up\n");
182 if (down != tunnel->src_port) {
183 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
187 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
188 tb_tunnel_warn(tunnel,
189 "tunnel is not fully activated, cleaning up\n");
193 tb_tunnel_dbg(tunnel, "discovered\n");
197 tb_tunnel_deactivate(tunnel);
199 tb_tunnel_free(tunnel);
205 * tb_tunnel_alloc_pci() - allocate a pci tunnel
206 * @tb: Pointer to the domain structure
207 * @up: PCIe upstream adapter port
208 * @down: PCIe downstream adapter port
210 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
213 * Return: Returns a tb_tunnel on success or NULL on failure.
215 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
216 struct tb_port *down)
218 struct tb_tunnel *tunnel;
219 struct tb_path *path;
221 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
225 tunnel->activate = tb_pci_activate;
226 tunnel->src_port = down;
227 tunnel->dst_port = up;
229 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
232 tb_tunnel_free(tunnel);
235 tb_pci_init_path(path);
236 tunnel->paths[TB_PCI_PATH_DOWN] = path;
238 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
241 tb_tunnel_free(tunnel);
244 tb_pci_init_path(path);
245 tunnel->paths[TB_PCI_PATH_UP] = path;
250 static bool tb_dp_is_usb4(const struct tb_switch *sw)
252 /* Titan Ridge DP adapters need the same treatment as USB4 */
253 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
256 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
262 /* Both ends need to support this */
263 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
266 ret = tb_port_read(out, &val, TB_CFG_PORT,
267 out->cap_adap + DP_STATUS_CTRL, 1);
271 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
273 ret = tb_port_write(out, &val, TB_CFG_PORT,
274 out->cap_adap + DP_STATUS_CTRL, 1);
279 ret = tb_port_read(out, &val, TB_CFG_PORT,
280 out->cap_adap + DP_STATUS_CTRL, 1);
283 if (!(val & DP_STATUS_CTRL_CMHS))
285 usleep_range(10, 100);
291 static inline u32 tb_dp_cap_get_rate(u32 val)
293 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
296 case DP_COMMON_CAP_RATE_RBR:
298 case DP_COMMON_CAP_RATE_HBR:
300 case DP_COMMON_CAP_RATE_HBR2:
302 case DP_COMMON_CAP_RATE_HBR3:
309 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
311 val &= ~DP_COMMON_CAP_RATE_MASK;
314 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
317 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
320 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
323 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
326 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
332 static inline u32 tb_dp_cap_get_lanes(u32 val)
334 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
337 case DP_COMMON_CAP_1_LANE:
339 case DP_COMMON_CAP_2_LANES:
341 case DP_COMMON_CAP_4_LANES:
348 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
350 val &= ~DP_COMMON_CAP_LANES_MASK;
353 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
357 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
360 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
363 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
369 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
371 /* Tunneling removes the DP 8b/10b encoding */
372 return rate * lanes * 8 / 10;
375 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
376 u32 out_rate, u32 out_lanes, u32 *new_rate,
379 static const u32 dp_bw[][2] = {
381 { 8100, 4 }, /* 25920 Mb/s */
382 { 5400, 4 }, /* 17280 Mb/s */
383 { 8100, 2 }, /* 12960 Mb/s */
384 { 2700, 4 }, /* 8640 Mb/s */
385 { 5400, 2 }, /* 8640 Mb/s */
386 { 8100, 1 }, /* 6480 Mb/s */
387 { 1620, 4 }, /* 5184 Mb/s */
388 { 5400, 1 }, /* 4320 Mb/s */
389 { 2700, 2 }, /* 4320 Mb/s */
390 { 1620, 2 }, /* 2592 Mb/s */
391 { 2700, 1 }, /* 2160 Mb/s */
392 { 1620, 1 }, /* 1296 Mb/s */
397 * Find a combination that can fit into max_bw and does not
398 * exceed the maximum rate and lanes supported by the DP OUT and
401 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
402 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
405 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
408 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
409 *new_rate = dp_bw[i][0];
410 *new_lanes = dp_bw[i][1];
418 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
420 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
421 struct tb_port *out = tunnel->dst_port;
422 struct tb_port *in = tunnel->src_port;
426 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
427 * newer generation hardware.
429 if (in->sw->generation < 2 || out->sw->generation < 2)
433 * Perform connection manager handshake between IN and OUT ports
434 * before capabilities exchange can take place.
436 ret = tb_dp_cm_handshake(in, out);
440 /* Read both DP_LOCAL_CAP registers */
441 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
442 in->cap_adap + DP_LOCAL_CAP, 1);
446 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
447 out->cap_adap + DP_LOCAL_CAP, 1);
451 /* Write IN local caps to OUT remote caps */
452 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
453 out->cap_adap + DP_REMOTE_CAP, 1);
457 in_rate = tb_dp_cap_get_rate(in_dp_cap);
458 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
459 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
460 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
463 * If the tunnel bandwidth is limited (max_bw is set) then see
464 * if we need to reduce bandwidth to fit there.
466 out_rate = tb_dp_cap_get_rate(out_dp_cap);
467 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
468 bw = tb_dp_bandwidth(out_rate, out_lanes);
469 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
470 out_rate, out_lanes, bw);
472 if (in->sw->config.depth < out->sw->config.depth)
473 max_bw = tunnel->max_down;
475 max_bw = tunnel->max_up;
477 if (max_bw && bw > max_bw) {
478 u32 new_rate, new_lanes, new_bw;
480 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
481 out_rate, out_lanes, &new_rate,
484 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
488 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
489 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
490 new_rate, new_lanes, new_bw);
493 * Set new rate and number of lanes before writing it to
494 * the IN port remote caps.
496 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
497 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
500 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
501 in->cap_adap + DP_REMOTE_CAP, 1);
504 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
509 struct tb_path **paths;
512 paths = tunnel->paths;
513 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
515 tb_dp_port_set_hops(tunnel->src_port,
516 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
517 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
518 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
520 tb_dp_port_set_hops(tunnel->dst_port,
521 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
522 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
523 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
525 tb_dp_port_hpd_clear(tunnel->src_port);
526 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
527 if (tb_port_is_dpout(tunnel->dst_port))
528 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
531 ret = tb_dp_port_enable(tunnel->src_port, active);
535 if (tb_port_is_dpout(tunnel->dst_port))
536 return tb_dp_port_enable(tunnel->dst_port, active);
541 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
544 struct tb_port *in = tunnel->src_port;
545 const struct tb_switch *sw = in->sw;
546 u32 val, rate = 0, lanes = 0;
549 if (tb_dp_is_usb4(sw)) {
553 * Wait for DPRX done. Normally it should be already set
557 ret = tb_port_read(in, &val, TB_CFG_PORT,
558 in->cap_adap + DP_COMMON_CAP, 1);
562 if (val & DP_COMMON_CAP_DPRX_DONE) {
563 rate = tb_dp_cap_get_rate(val);
564 lanes = tb_dp_cap_get_lanes(val);
572 } else if (sw->generation >= 2) {
574 * Read from the copied remote cap so that we take into
575 * account if capabilities were reduced during exchange.
577 ret = tb_port_read(in, &val, TB_CFG_PORT,
578 in->cap_adap + DP_REMOTE_CAP, 1);
582 rate = tb_dp_cap_get_rate(val);
583 lanes = tb_dp_cap_get_lanes(val);
585 /* No bandwidth management for legacy devices */
591 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
593 *consumed_down = tb_dp_bandwidth(rate, lanes);
595 *consumed_up = tb_dp_bandwidth(rate, lanes);
602 static void tb_dp_init_aux_path(struct tb_path *path)
606 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
607 path->egress_shared_buffer = TB_PATH_NONE;
608 path->ingress_fc_enable = TB_PATH_ALL;
609 path->ingress_shared_buffer = TB_PATH_NONE;
613 for (i = 0; i < path->path_length; i++)
614 path->hops[i].initial_credits = 1;
617 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
619 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
621 path->egress_fc_enable = TB_PATH_NONE;
622 path->egress_shared_buffer = TB_PATH_NONE;
623 path->ingress_fc_enable = TB_PATH_NONE;
624 path->ingress_shared_buffer = TB_PATH_NONE;
629 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
633 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
634 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
635 /* Leave some credits for AUX path */
636 path->nfc_credits = min(max_credits - 2, 12U);
641 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
642 * @tb: Pointer to the domain structure
645 * If @in adapter is active, follows the tunnel to the DP out adapter
646 * and back. Returns the discovered tunnel or %NULL if there was no
649 * Return: DP tunnel or %NULL if no tunnel found.
651 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
653 struct tb_tunnel *tunnel;
654 struct tb_port *port;
655 struct tb_path *path;
657 if (!tb_dp_port_is_enabled(in))
660 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
664 tunnel->init = tb_dp_xchg_caps;
665 tunnel->activate = tb_dp_activate;
666 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
667 tunnel->src_port = in;
669 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
670 &tunnel->dst_port, "Video");
672 /* Just disable the DP IN port */
673 tb_dp_port_enable(in, false);
676 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
677 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
679 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
682 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
683 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
685 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
689 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
690 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
692 /* Validate that the tunnel is complete */
693 if (!tb_port_is_dpout(tunnel->dst_port)) {
694 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
698 if (!tb_dp_port_is_enabled(tunnel->dst_port))
701 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
704 if (port != tunnel->src_port) {
705 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
709 tb_tunnel_dbg(tunnel, "discovered\n");
713 tb_tunnel_deactivate(tunnel);
715 tb_tunnel_free(tunnel);
721 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
722 * @tb: Pointer to the domain structure
723 * @in: DP in adapter port
724 * @out: DP out adapter port
725 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
727 * @max_down: Maximum available downstream bandwidth for the DP tunnel
728 * (%0 if not limited)
730 * Allocates a tunnel between @in and @out that is capable of tunneling
731 * Display Port traffic.
733 * Return: Returns a tb_tunnel on success or NULL on failure.
735 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
736 struct tb_port *out, int max_up,
739 struct tb_tunnel *tunnel;
740 struct tb_path **paths;
741 struct tb_path *path;
743 if (WARN_ON(!in->cap_adap || !out->cap_adap))
746 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
750 tunnel->init = tb_dp_xchg_caps;
751 tunnel->activate = tb_dp_activate;
752 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
753 tunnel->src_port = in;
754 tunnel->dst_port = out;
755 tunnel->max_up = max_up;
756 tunnel->max_down = max_down;
758 paths = tunnel->paths;
760 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
764 tb_dp_init_video_path(path, false);
765 paths[TB_DP_VIDEO_PATH_OUT] = path;
767 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
768 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
771 tb_dp_init_aux_path(path);
772 paths[TB_DP_AUX_PATH_OUT] = path;
774 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
775 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
778 tb_dp_init_aux_path(path);
779 paths[TB_DP_AUX_PATH_IN] = path;
784 tb_tunnel_free(tunnel);
788 static u32 tb_dma_credits(struct tb_port *nhi)
792 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
793 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
794 return min(max_credits, 13U);
797 static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
801 path->egress_fc_enable = efc;
802 path->ingress_fc_enable = TB_PATH_ALL;
803 path->egress_shared_buffer = TB_PATH_NONE;
804 path->ingress_shared_buffer = TB_PATH_NONE;
807 path->clear_fc = true;
809 for (i = 0; i < path->path_length; i++)
810 path->hops[i].initial_credits = credits;
814 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
815 * @tb: Pointer to the domain structure
816 * @nhi: Host controller port
817 * @dst: Destination null port which the other domain is connected to
818 * @transmit_ring: NHI ring number used to send packets towards the
819 * other domain. Set to %0 if TX path is not needed.
820 * @transmit_path: HopID used for transmitting packets
821 * @receive_ring: NHI ring number used to receive packets from the
822 * other domain. Set to %0 if RX path is not needed.
823 * @receive_path: HopID used for receiving packets
825 * Return: Returns a tb_tunnel on success or NULL on failure.
827 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
828 struct tb_port *dst, int transmit_ring,
829 int transmit_path, int receive_ring,
832 struct tb_tunnel *tunnel;
833 size_t npaths = 0, i = 0;
834 struct tb_path *path;
842 if (WARN_ON(!npaths))
845 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
849 tunnel->src_port = nhi;
850 tunnel->dst_port = dst;
852 credits = tb_dma_credits(nhi);
855 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
858 tb_tunnel_free(tunnel);
861 tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
862 tunnel->paths[i++] = path;
866 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
869 tb_tunnel_free(tunnel);
872 tb_dma_init_path(path, TB_PATH_ALL, credits);
873 tunnel->paths[i++] = path;
879 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
881 int ret, up_max_rate, down_max_rate;
883 ret = usb4_usb3_port_max_link_rate(up);
888 ret = usb4_usb3_port_max_link_rate(down);
893 return min(up_max_rate, down_max_rate);
896 static int tb_usb3_init(struct tb_tunnel *tunnel)
898 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
899 tunnel->allocated_up, tunnel->allocated_down);
901 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
902 &tunnel->allocated_up,
903 &tunnel->allocated_down);
906 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
910 res = tb_usb3_port_enable(tunnel->src_port, activate);
914 if (tb_port_is_usb3_up(tunnel->dst_port))
915 return tb_usb3_port_enable(tunnel->dst_port, activate);
920 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
921 int *consumed_up, int *consumed_down)
923 int pcie_enabled = tb_acpi_may_tunnel_pcie();
926 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
927 * take that it into account here.
929 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
930 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
934 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
938 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
939 &tunnel->allocated_up,
940 &tunnel->allocated_down);
944 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
945 tunnel->allocated_up, tunnel->allocated_down);
949 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
953 int ret, max_rate, allocate_up, allocate_down;
955 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
957 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
960 /* Use maximum link rate if the link valid is not set */
961 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
963 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
969 * 90% of the max rate can be allocated for isochronous
972 max_rate = ret * 90 / 100;
974 /* No need to reclaim if already at maximum */
975 if (tunnel->allocated_up >= max_rate &&
976 tunnel->allocated_down >= max_rate)
979 /* Don't go lower than what is already allocated */
980 allocate_up = min(max_rate, *available_up);
981 if (allocate_up < tunnel->allocated_up)
982 allocate_up = tunnel->allocated_up;
984 allocate_down = min(max_rate, *available_down);
985 if (allocate_down < tunnel->allocated_down)
986 allocate_down = tunnel->allocated_down;
988 /* If no changes no need to do more */
989 if (allocate_up == tunnel->allocated_up &&
990 allocate_down == tunnel->allocated_down)
993 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
996 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1000 tunnel->allocated_up = allocate_up;
1001 *available_up -= tunnel->allocated_up;
1003 tunnel->allocated_down = allocate_down;
1004 *available_down -= tunnel->allocated_down;
1006 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1007 tunnel->allocated_up, tunnel->allocated_down);
1010 static void tb_usb3_init_path(struct tb_path *path)
1012 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1013 path->egress_shared_buffer = TB_PATH_NONE;
1014 path->ingress_fc_enable = TB_PATH_ALL;
1015 path->ingress_shared_buffer = TB_PATH_NONE;
1018 path->drop_packages = 0;
1019 path->nfc_credits = 0;
1020 path->hops[0].initial_credits = 7;
1021 if (path->path_length > 1)
1022 path->hops[1].initial_credits =
1023 tb_initial_credits(path->hops[1].in_port->sw);
1027 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1028 * @tb: Pointer to the domain structure
1029 * @down: USB3 downstream adapter
1031 * If @down adapter is active, follows the tunnel to the USB3 upstream
1032 * adapter and back. Returns the discovered tunnel or %NULL if there was
1035 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1037 struct tb_tunnel *tunnel;
1038 struct tb_path *path;
1040 if (!tb_usb3_port_is_enabled(down))
1043 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1047 tunnel->activate = tb_usb3_activate;
1048 tunnel->src_port = down;
1051 * Discover both paths even if they are not complete. We will
1052 * clean them up by calling tb_tunnel_deactivate() below in that
1055 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1056 &tunnel->dst_port, "USB3 Down");
1058 /* Just disable the downstream port */
1059 tb_usb3_port_enable(down, false);
1062 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1063 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1065 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1068 goto err_deactivate;
1069 tunnel->paths[TB_USB3_PATH_UP] = path;
1070 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1072 /* Validate that the tunnel is complete */
1073 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1074 tb_port_warn(tunnel->dst_port,
1075 "path does not end on an USB3 adapter, cleaning up\n");
1076 goto err_deactivate;
1079 if (down != tunnel->src_port) {
1080 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1081 goto err_deactivate;
1084 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1085 tb_tunnel_warn(tunnel,
1086 "tunnel is not fully activated, cleaning up\n");
1087 goto err_deactivate;
1090 if (!tb_route(down->sw)) {
1094 * Read the initial bandwidth allocation for the first
1097 ret = usb4_usb3_port_allocated_bandwidth(down,
1098 &tunnel->allocated_up, &tunnel->allocated_down);
1100 goto err_deactivate;
1102 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1103 tunnel->allocated_up, tunnel->allocated_down);
1105 tunnel->init = tb_usb3_init;
1106 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1107 tunnel->release_unused_bandwidth =
1108 tb_usb3_release_unused_bandwidth;
1109 tunnel->reclaim_available_bandwidth =
1110 tb_usb3_reclaim_available_bandwidth;
1113 tb_tunnel_dbg(tunnel, "discovered\n");
1117 tb_tunnel_deactivate(tunnel);
1119 tb_tunnel_free(tunnel);
1125 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1126 * @tb: Pointer to the domain structure
1127 * @up: USB3 upstream adapter port
1128 * @down: USB3 downstream adapter port
1129 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1131 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1132 * (%0 if not limited).
1134 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1135 * @TB_TYPE_USB3_DOWN.
1137 * Return: Returns a tb_tunnel on success or %NULL on failure.
1139 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1140 struct tb_port *down, int max_up,
1143 struct tb_tunnel *tunnel;
1144 struct tb_path *path;
1148 * Check that we have enough bandwidth available for the new
1151 if (max_up > 0 || max_down > 0) {
1152 max_rate = tb_usb3_max_link_rate(down, up);
1156 /* Only 90% can be allocated for USB3 isochronous transfers */
1157 max_rate = max_rate * 90 / 100;
1158 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1161 if (max_rate > max_up || max_rate > max_down) {
1162 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1167 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1171 tunnel->activate = tb_usb3_activate;
1172 tunnel->src_port = down;
1173 tunnel->dst_port = up;
1174 tunnel->max_up = max_up;
1175 tunnel->max_down = max_down;
1177 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1180 tb_tunnel_free(tunnel);
1183 tb_usb3_init_path(path);
1184 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1186 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1189 tb_tunnel_free(tunnel);
1192 tb_usb3_init_path(path);
1193 tunnel->paths[TB_USB3_PATH_UP] = path;
1195 if (!tb_route(down->sw)) {
1196 tunnel->allocated_up = max_rate;
1197 tunnel->allocated_down = max_rate;
1199 tunnel->init = tb_usb3_init;
1200 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1201 tunnel->release_unused_bandwidth =
1202 tb_usb3_release_unused_bandwidth;
1203 tunnel->reclaim_available_bandwidth =
1204 tb_usb3_reclaim_available_bandwidth;
1211 * tb_tunnel_free() - free a tunnel
1212 * @tunnel: Tunnel to be freed
1214 * Frees a tunnel. The tunnel does not need to be deactivated.
1216 void tb_tunnel_free(struct tb_tunnel *tunnel)
1223 for (i = 0; i < tunnel->npaths; i++) {
1224 if (tunnel->paths[i])
1225 tb_path_free(tunnel->paths[i]);
1228 kfree(tunnel->paths);
1233 * tb_tunnel_is_invalid - check whether an activated path is still valid
1234 * @tunnel: Tunnel to check
1236 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1240 for (i = 0; i < tunnel->npaths; i++) {
1241 WARN_ON(!tunnel->paths[i]->activated);
1242 if (tb_path_is_invalid(tunnel->paths[i]))
1250 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1251 * @tunnel: Tunnel to restart
1253 * Return: 0 on success and negative errno in case if failure
1255 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1259 tb_tunnel_dbg(tunnel, "activating\n");
1262 * Make sure all paths are properly disabled before enabling
1265 for (i = 0; i < tunnel->npaths; i++) {
1266 if (tunnel->paths[i]->activated) {
1267 tb_path_deactivate(tunnel->paths[i]);
1268 tunnel->paths[i]->activated = false;
1273 res = tunnel->init(tunnel);
1278 for (i = 0; i < tunnel->npaths; i++) {
1279 res = tb_path_activate(tunnel->paths[i]);
1284 if (tunnel->activate) {
1285 res = tunnel->activate(tunnel, true);
1293 tb_tunnel_warn(tunnel, "activation failed\n");
1294 tb_tunnel_deactivate(tunnel);
1299 * tb_tunnel_activate() - activate a tunnel
1300 * @tunnel: Tunnel to activate
1302 * Return: Returns 0 on success or an error code on failure.
1304 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1308 for (i = 0; i < tunnel->npaths; i++) {
1309 if (tunnel->paths[i]->activated) {
1310 tb_tunnel_WARN(tunnel,
1311 "trying to activate an already activated tunnel\n");
1316 return tb_tunnel_restart(tunnel);
1320 * tb_tunnel_deactivate() - deactivate a tunnel
1321 * @tunnel: Tunnel to deactivate
1323 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1327 tb_tunnel_dbg(tunnel, "deactivating\n");
1329 if (tunnel->activate)
1330 tunnel->activate(tunnel, false);
1332 for (i = 0; i < tunnel->npaths; i++) {
1333 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1334 tb_path_deactivate(tunnel->paths[i]);
1339 * tb_tunnel_port_on_path() - Does the tunnel go through port
1340 * @tunnel: Tunnel to check
1341 * @port: Port to check
1343 * Returns true if @tunnel goes through @port (direction does not matter),
1346 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1347 const struct tb_port *port)
1351 for (i = 0; i < tunnel->npaths; i++) {
1352 if (!tunnel->paths[i])
1355 if (tb_path_port_on_path(tunnel->paths[i], port))
1362 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1366 for (i = 0; i < tunnel->npaths; i++) {
1367 if (!tunnel->paths[i])
1369 if (!tunnel->paths[i]->activated)
1377 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1378 * @tunnel: Tunnel to check
1379 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1381 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1384 * Stores the amount of isochronous bandwidth @tunnel consumes in
1385 * @consumed_up and @consumed_down. In case of success returns %0,
1386 * negative errno otherwise.
1388 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1391 int up_bw = 0, down_bw = 0;
1393 if (!tb_tunnel_is_active(tunnel))
1396 if (tunnel->consumed_bandwidth) {
1399 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1403 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1409 *consumed_up = up_bw;
1411 *consumed_down = down_bw;
1417 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1418 * @tunnel: Tunnel whose unused bandwidth to release
1420 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1421 * moment) this function makes it to release all the unused bandwidth.
1423 * Returns %0 in case of success and negative errno otherwise.
1425 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1427 if (!tb_tunnel_is_active(tunnel))
1430 if (tunnel->release_unused_bandwidth) {
1433 ret = tunnel->release_unused_bandwidth(tunnel);
1442 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1443 * @tunnel: Tunnel reclaiming available bandwidth
1444 * @available_up: Available upstream bandwidth (in Mb/s)
1445 * @available_down: Available downstream bandwidth (in Mb/s)
1447 * Reclaims bandwidth from @available_up and @available_down and updates
1448 * the variables accordingly (e.g decreases both according to what was
1449 * reclaimed by the tunnel). If nothing was reclaimed the values are
1452 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1454 int *available_down)
1456 if (!tb_tunnel_is_active(tunnel))
1459 if (tunnel->reclaim_available_bandwidth)
1460 tunnel->reclaim_available_bandwidth(tunnel, available_up,