1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID 8
21 #define TB_PCI_PATH_DOWN 0
22 #define TB_PCI_PATH_UP 1
24 #define TB_PCI_PRIORITY 3
25 #define TB_PCI_WEIGHT 1
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID 8
30 #define TB_USB3_PATH_DOWN 0
31 #define TB_USB3_PATH_UP 1
33 #define TB_USB3_PRIORITY 3
34 #define TB_USB3_WEIGHT 2
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID 8
38 #define TB_DP_AUX_RX_HOPID 8
39 #define TB_DP_VIDEO_HOPID 9
41 #define TB_DP_VIDEO_PATH_OUT 0
42 #define TB_DP_AUX_PATH_OUT 1
43 #define TB_DP_AUX_PATH_IN 2
45 #define TB_DP_VIDEO_PRIORITY 1
46 #define TB_DP_VIDEO_WEIGHT 1
48 #define TB_DP_AUX_PRIORITY 2
49 #define TB_DP_AUX_WEIGHT 1
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS 6U
54 * Number of credits we try to allocate for each DMA path if not limited
55 * by the host router baMaxHI.
57 #define TB_DMA_CREDITS 14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS 1
61 #define TB_DMA_PRIORITY 5
62 #define TB_DMA_WEIGHT 1
65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66 * according to USB4 v2 Connection Manager guide. This ends up reserving
67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
70 #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
73 static unsigned int dma_credits = TB_DMA_CREDITS;
74 module_param(dma_credits, uint, 0444);
75 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
76 __MODULE_STRING(TB_DMA_CREDITS) ")");
78 static bool bw_alloc_mode = true;
79 module_param(bw_alloc_mode, bool, 0444);
80 MODULE_PARM_DESC(bw_alloc_mode,
81 "enable bandwidth allocation mode if supported (default: true)");
83 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
85 static inline unsigned int tb_usable_credits(const struct tb_port *port)
87 return port->total_credits - port->ctl_credits;
91 * tb_available_credits() - Available credits for PCIe and DMA
92 * @port: Lane adapter to check
93 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
94 * streams possible through this lane adapter
96 static unsigned int tb_available_credits(const struct tb_port *port,
97 size_t *max_dp_streams)
99 const struct tb_switch *sw = port->sw;
100 int credits, usb3, pcie, spare;
103 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
104 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
106 if (tb_acpi_is_xdomain_allowed()) {
107 spare = min_not_zero(sw->max_dma_credits, dma_credits);
108 /* Add some credits for potential second DMA tunnel */
109 spare += TB_MIN_DMA_CREDITS;
114 credits = tb_usable_credits(port);
115 if (tb_acpi_may_tunnel_dp()) {
117 * Maximum number of DP streams possible through the
120 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
121 ndp = (credits - (usb3 + pcie + spare)) /
122 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
128 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
132 *max_dp_streams = ndp;
134 return credits > 0 ? credits : 0;
137 static void tb_init_pm_support(struct tb_path_hop *hop)
139 struct tb_port *out_port = hop->out_port;
140 struct tb_port *in_port = hop->in_port;
142 if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
143 usb4_switch_version(in_port->sw) >= 2)
144 hop->pm_support = true;
147 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
148 enum tb_tunnel_type type)
150 struct tb_tunnel *tunnel;
152 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
156 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
157 if (!tunnel->paths) {
158 tb_tunnel_free(tunnel);
162 INIT_LIST_HEAD(&tunnel->list);
164 tunnel->npaths = npaths;
170 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
172 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
175 /* Only supported of both routers are at least USB4 v2 */
176 if (tb_port_get_link_generation(port) < 4)
179 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
183 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
187 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
188 str_enabled_disabled(enable));
192 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
197 res = tb_pci_set_ext_encapsulation(tunnel, activate);
203 res = tb_pci_port_enable(tunnel->dst_port, activate);
205 res = tb_pci_port_enable(tunnel->src_port, activate);
211 res = tb_pci_port_enable(tunnel->src_port, activate);
215 /* Downstream router could be unplugged */
216 tb_pci_port_enable(tunnel->dst_port, activate);
219 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
222 static int tb_pci_init_credits(struct tb_path_hop *hop)
224 struct tb_port *port = hop->in_port;
225 struct tb_switch *sw = port->sw;
226 unsigned int credits;
228 if (tb_port_use_credit_allocation(port)) {
229 unsigned int available;
231 available = tb_available_credits(port, NULL);
232 credits = min(sw->max_pcie_credits, available);
234 if (credits < TB_MIN_PCIE_CREDITS)
237 credits = max(TB_MIN_PCIE_CREDITS, credits);
239 if (tb_port_is_null(port))
240 credits = port->bonded ? 32 : 16;
245 hop->initial_credits = credits;
249 static int tb_pci_init_path(struct tb_path *path)
251 struct tb_path_hop *hop;
253 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
254 path->egress_shared_buffer = TB_PATH_NONE;
255 path->ingress_fc_enable = TB_PATH_ALL;
256 path->ingress_shared_buffer = TB_PATH_NONE;
257 path->priority = TB_PCI_PRIORITY;
258 path->weight = TB_PCI_WEIGHT;
259 path->drop_packages = 0;
261 tb_path_for_each_hop(path, hop) {
264 ret = tb_pci_init_credits(hop);
273 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
274 * @tb: Pointer to the domain structure
275 * @down: PCIe downstream adapter
276 * @alloc_hopid: Allocate HopIDs from visited ports
278 * If @down adapter is active, follows the tunnel to the PCIe upstream
279 * adapter and back. Returns the discovered tunnel or %NULL if there was
282 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
285 struct tb_tunnel *tunnel;
286 struct tb_path *path;
288 if (!tb_pci_port_is_enabled(down))
291 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
295 tunnel->activate = tb_pci_activate;
296 tunnel->src_port = down;
299 * Discover both paths even if they are not complete. We will
300 * clean them up by calling tb_tunnel_deactivate() below in that
303 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
304 &tunnel->dst_port, "PCIe Up", alloc_hopid);
306 /* Just disable the downstream port */
307 tb_pci_port_enable(down, false);
310 tunnel->paths[TB_PCI_PATH_UP] = path;
311 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
314 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
315 "PCIe Down", alloc_hopid);
318 tunnel->paths[TB_PCI_PATH_DOWN] = path;
319 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
322 /* Validate that the tunnel is complete */
323 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
324 tb_port_warn(tunnel->dst_port,
325 "path does not end on a PCIe adapter, cleaning up\n");
329 if (down != tunnel->src_port) {
330 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
334 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
335 tb_tunnel_warn(tunnel,
336 "tunnel is not fully activated, cleaning up\n");
340 tb_tunnel_dbg(tunnel, "discovered\n");
344 tb_tunnel_deactivate(tunnel);
346 tb_tunnel_free(tunnel);
352 * tb_tunnel_alloc_pci() - allocate a pci tunnel
353 * @tb: Pointer to the domain structure
354 * @up: PCIe upstream adapter port
355 * @down: PCIe downstream adapter port
357 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
360 * Return: Returns a tb_tunnel on success or NULL on failure.
362 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
363 struct tb_port *down)
365 struct tb_tunnel *tunnel;
366 struct tb_path *path;
368 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
372 tunnel->activate = tb_pci_activate;
373 tunnel->src_port = down;
374 tunnel->dst_port = up;
376 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
380 tunnel->paths[TB_PCI_PATH_DOWN] = path;
381 if (tb_pci_init_path(path))
384 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
388 tunnel->paths[TB_PCI_PATH_UP] = path;
389 if (tb_pci_init_path(path))
395 tb_tunnel_free(tunnel);
400 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
401 * @port: Lane 0 adapter
402 * @reserved_up: Upstream bandwidth in Mb/s to reserve
403 * @reserved_down: Downstream bandwidth in Mb/s to reserve
405 * Can be called to any connected lane 0 adapter to find out how much
406 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
407 * Returns true if there is something to be reserved and writes the
408 * amount to @reserved_down/@reserved_up. Otherwise returns false and
409 * does not touch the parameters.
411 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
414 if (WARN_ON_ONCE(!port->remote))
417 if (!tb_acpi_may_tunnel_pcie())
420 if (tb_port_get_link_generation(port) < 4)
423 /* Must have PCIe adapters */
424 if (tb_is_upstream_port(port)) {
425 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
427 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
430 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
432 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
436 *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
437 *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
439 tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
444 static bool tb_dp_is_usb4(const struct tb_switch *sw)
446 /* Titan Ridge DP adapters need the same treatment as USB4 */
447 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
450 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
453 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
457 /* Both ends need to support this */
458 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
461 ret = tb_port_read(out, &val, TB_CFG_PORT,
462 out->cap_adap + DP_STATUS_CTRL, 1);
466 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
468 ret = tb_port_write(out, &val, TB_CFG_PORT,
469 out->cap_adap + DP_STATUS_CTRL, 1);
474 ret = tb_port_read(out, &val, TB_CFG_PORT,
475 out->cap_adap + DP_STATUS_CTRL, 1);
478 if (!(val & DP_STATUS_CTRL_CMHS))
480 usleep_range(100, 150);
481 } while (ktime_before(ktime_get(), timeout));
487 * Returns maximum possible rate from capability supporting only DP 2.0
488 * and below. Used when DP BW allocation mode is not enabled.
490 static inline u32 tb_dp_cap_get_rate(u32 val)
492 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
495 case DP_COMMON_CAP_RATE_RBR:
497 case DP_COMMON_CAP_RATE_HBR:
499 case DP_COMMON_CAP_RATE_HBR2:
501 case DP_COMMON_CAP_RATE_HBR3:
509 * Returns maximum possible rate from capability supporting DP 2.1
510 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
513 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
515 if (val & DP_COMMON_CAP_UHBR20)
517 else if (val & DP_COMMON_CAP_UHBR13_5)
519 else if (val & DP_COMMON_CAP_UHBR10)
522 return tb_dp_cap_get_rate(val);
525 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
527 return rate >= 10000;
530 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
532 val &= ~DP_COMMON_CAP_RATE_MASK;
535 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
538 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
541 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
544 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
547 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
553 static inline u32 tb_dp_cap_get_lanes(u32 val)
555 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
558 case DP_COMMON_CAP_1_LANE:
560 case DP_COMMON_CAP_2_LANES:
562 case DP_COMMON_CAP_4_LANES:
569 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
571 val &= ~DP_COMMON_CAP_LANES_MASK;
574 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
578 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
581 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
584 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
590 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
592 /* Tunneling removes the DP 8b/10b 128/132b encoding */
593 if (tb_dp_is_uhbr_rate(rate))
594 return rate * lanes * 128 / 132;
595 return rate * lanes * 8 / 10;
598 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
599 u32 out_rate, u32 out_lanes, u32 *new_rate,
602 static const u32 dp_bw[][2] = {
604 { 8100, 4 }, /* 25920 Mb/s */
605 { 5400, 4 }, /* 17280 Mb/s */
606 { 8100, 2 }, /* 12960 Mb/s */
607 { 2700, 4 }, /* 8640 Mb/s */
608 { 5400, 2 }, /* 8640 Mb/s */
609 { 8100, 1 }, /* 6480 Mb/s */
610 { 1620, 4 }, /* 5184 Mb/s */
611 { 5400, 1 }, /* 4320 Mb/s */
612 { 2700, 2 }, /* 4320 Mb/s */
613 { 1620, 2 }, /* 2592 Mb/s */
614 { 2700, 1 }, /* 2160 Mb/s */
615 { 1620, 1 }, /* 1296 Mb/s */
620 * Find a combination that can fit into max_bw and does not
621 * exceed the maximum rate and lanes supported by the DP OUT and
624 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
625 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
628 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
631 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
632 *new_rate = dp_bw[i][0];
633 *new_lanes = dp_bw[i][1];
641 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
643 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
644 struct tb_port *out = tunnel->dst_port;
645 struct tb_port *in = tunnel->src_port;
649 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
650 * newer generation hardware.
652 if (in->sw->generation < 2 || out->sw->generation < 2)
656 * Perform connection manager handshake between IN and OUT ports
657 * before capabilities exchange can take place.
659 ret = tb_dp_cm_handshake(in, out, 3000);
663 /* Read both DP_LOCAL_CAP registers */
664 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
665 in->cap_adap + DP_LOCAL_CAP, 1);
669 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
670 out->cap_adap + DP_LOCAL_CAP, 1);
674 /* Write IN local caps to OUT remote caps */
675 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
676 out->cap_adap + DP_REMOTE_CAP, 1);
680 in_rate = tb_dp_cap_get_rate(in_dp_cap);
681 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
682 tb_tunnel_dbg(tunnel,
683 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
684 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
687 * If the tunnel bandwidth is limited (max_bw is set) then see
688 * if we need to reduce bandwidth to fit there.
690 out_rate = tb_dp_cap_get_rate(out_dp_cap);
691 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
692 bw = tb_dp_bandwidth(out_rate, out_lanes);
693 tb_tunnel_dbg(tunnel,
694 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
695 out_rate, out_lanes, bw);
697 if (tb_port_path_direction_downstream(in, out))
698 max_bw = tunnel->max_down;
700 max_bw = tunnel->max_up;
702 if (max_bw && bw > max_bw) {
703 u32 new_rate, new_lanes, new_bw;
705 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
706 out_rate, out_lanes, &new_rate,
709 tb_tunnel_info(tunnel, "not enough bandwidth\n");
713 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
714 tb_tunnel_dbg(tunnel,
715 "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
716 new_rate, new_lanes, new_bw);
719 * Set new rate and number of lanes before writing it to
720 * the IN port remote caps.
722 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
723 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
727 * Titan Ridge does not disable AUX timers when it gets
728 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
731 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
732 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
733 tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
736 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
737 in->cap_adap + DP_REMOTE_CAP, 1);
740 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
742 int ret, estimated_bw, granularity, tmp;
743 struct tb_port *out = tunnel->dst_port;
744 struct tb_port *in = tunnel->src_port;
745 u32 out_dp_cap, out_rate, out_lanes;
746 u32 in_dp_cap, in_rate, in_lanes;
752 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
756 ret = usb4_dp_port_set_group_id(in, in->group->index);
761 * Get the non-reduced rate and lanes based on the lowest
762 * capability of both adapters.
764 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
765 in->cap_adap + DP_LOCAL_CAP, 1);
769 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
770 out->cap_adap + DP_LOCAL_CAP, 1);
774 in_rate = tb_dp_cap_get_rate(in_dp_cap);
775 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
776 out_rate = tb_dp_cap_get_rate(out_dp_cap);
777 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
779 rate = min(in_rate, out_rate);
780 lanes = min(in_lanes, out_lanes);
781 tmp = tb_dp_bandwidth(rate, lanes);
783 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
786 ret = usb4_dp_port_set_nrd(in, rate, lanes);
791 * Pick up granularity that supports maximum possible bandwidth.
792 * For that we use the UHBR rates too.
794 in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
795 out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
796 rate = min(in_rate, out_rate);
797 tmp = tb_dp_bandwidth(rate, lanes);
799 tb_tunnel_dbg(tunnel,
800 "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
803 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
807 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
810 * Returns -EINVAL if granularity above is outside of the
813 ret = usb4_dp_port_set_granularity(in, granularity);
818 * Bandwidth estimation is pretty much what we have in
819 * max_up/down fields. For discovery we just read what the
820 * estimation was set to.
822 if (tb_port_path_direction_downstream(in, out))
823 estimated_bw = tunnel->max_down;
825 estimated_bw = tunnel->max_up;
827 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
829 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
833 /* Initial allocation should be 0 according the spec */
834 ret = usb4_dp_port_allocate_bandwidth(in, 0);
838 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
842 static int tb_dp_init(struct tb_tunnel *tunnel)
844 struct tb_port *in = tunnel->src_port;
845 struct tb_switch *sw = in->sw;
846 struct tb *tb = in->sw->tb;
849 ret = tb_dp_xchg_caps(tunnel);
853 if (!tb_switch_is_usb4(sw))
856 if (!usb4_dp_port_bandwidth_mode_supported(in))
859 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
861 ret = usb4_dp_port_set_cm_id(in, tb->index);
865 return tb_dp_bandwidth_alloc_mode_enable(tunnel);
868 static void tb_dp_deinit(struct tb_tunnel *tunnel)
870 struct tb_port *in = tunnel->src_port;
872 if (!usb4_dp_port_bandwidth_mode_supported(in))
874 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
875 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
876 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
880 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
885 struct tb_path **paths;
888 paths = tunnel->paths;
889 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
891 tb_dp_port_set_hops(tunnel->src_port,
892 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
893 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
894 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
896 tb_dp_port_set_hops(tunnel->dst_port,
897 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
898 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
899 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
901 tb_dp_port_hpd_clear(tunnel->src_port);
902 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
903 if (tb_port_is_dpout(tunnel->dst_port))
904 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
907 ret = tb_dp_port_enable(tunnel->src_port, active);
911 if (tb_port_is_dpout(tunnel->dst_port))
912 return tb_dp_port_enable(tunnel->dst_port, active);
917 /* max_bw is rounded up to next granularity */
918 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
921 struct tb_port *in = tunnel->src_port;
922 int ret, rate, lanes, nrd_bw;
926 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
927 * read parameter values so this so we can use this to determine
928 * the maximum possible bandwidth over this link.
930 * See USB4 v2 spec 1.0 10.4.4.5.
932 ret = tb_port_read(in, &cap, TB_CFG_PORT,
933 in->cap_adap + DP_LOCAL_CAP, 1);
937 rate = tb_dp_cap_get_rate_ext(cap);
938 if (tb_dp_is_uhbr_rate(rate)) {
940 * When UHBR is used there is no reduction in lanes so
941 * we can use this directly.
943 lanes = tb_dp_cap_get_lanes(cap);
946 * If there is no UHBR supported then check the
947 * non-reduced rate and lanes.
949 ret = usb4_dp_port_nrd(in, &rate, &lanes);
954 nrd_bw = tb_dp_bandwidth(rate, lanes);
957 ret = usb4_dp_port_granularity(in);
960 *max_bw = roundup(nrd_bw, ret);
966 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
970 struct tb_port *out = tunnel->dst_port;
971 struct tb_port *in = tunnel->src_port;
972 int ret, allocated_bw, max_bw;
974 if (!usb4_dp_port_bandwidth_mode_enabled(in))
977 if (!tunnel->bw_mode)
980 /* Read what was allocated previously if any */
981 ret = usb4_dp_port_allocated_bandwidth(in);
986 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
989 if (allocated_bw == max_bw)
992 if (tb_port_path_direction_downstream(in, out)) {
994 *consumed_down = allocated_bw;
996 *consumed_up = allocated_bw;
1003 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1004 int *allocated_down)
1006 struct tb_port *out = tunnel->dst_port;
1007 struct tb_port *in = tunnel->src_port;
1010 * If we have already set the allocated bandwidth then use that.
1011 * Otherwise we read it from the DPRX.
1013 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1014 int ret, allocated_bw, max_bw;
1016 ret = usb4_dp_port_allocated_bandwidth(in);
1021 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1024 if (allocated_bw == max_bw)
1027 if (tb_port_path_direction_downstream(in, out)) {
1029 *allocated_down = allocated_bw;
1031 *allocated_up = allocated_bw;
1032 *allocated_down = 0;
1037 return tunnel->consumed_bandwidth(tunnel, allocated_up,
1041 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1044 struct tb_port *out = tunnel->dst_port;
1045 struct tb_port *in = tunnel->src_port;
1046 int max_bw, ret, tmp;
1048 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1051 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1055 if (tb_port_path_direction_downstream(in, out)) {
1056 tmp = min(*alloc_down, max_bw);
1057 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1063 tmp = min(*alloc_up, max_bw);
1064 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1071 /* Now we can use BW mode registers to figure out the bandwidth */
1072 /* TODO: need to handle discovery too */
1073 tunnel->bw_mode = true;
1077 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1079 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1080 struct tb_port *in = tunnel->src_port;
1083 * Wait for DPRX done. Normally it should be already set for
1090 ret = tb_port_read(in, &val, TB_CFG_PORT,
1091 in->cap_adap + DP_COMMON_CAP, 1);
1095 if (val & DP_COMMON_CAP_DPRX_DONE) {
1096 tb_tunnel_dbg(tunnel, "DPRX read done\n");
1099 usleep_range(100, 150);
1100 } while (ktime_before(ktime_get(), timeout));
1102 tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1106 /* Read cap from tunnel DP IN */
1107 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1110 struct tb_port *in = tunnel->src_port;
1121 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1126 * Read from the copied remote cap so that we take into account
1127 * if capabilities were reduced during exchange.
1129 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1133 *rate = tb_dp_cap_get_rate(val);
1134 *lanes = tb_dp_cap_get_lanes(val);
1138 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1141 struct tb_port *in = tunnel->src_port;
1144 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1147 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1151 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1162 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1165 struct tb_port *in = tunnel->src_port;
1166 const struct tb_switch *sw = in->sw;
1167 u32 rate = 0, lanes = 0;
1170 if (tb_dp_is_usb4(sw)) {
1172 * On USB4 routers check if the bandwidth allocation
1173 * mode is enabled first and then read the bandwidth
1174 * through those registers.
1176 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1179 if (ret != -EOPNOTSUPP)
1185 * Then see if the DPRX negotiation is ready and if yes
1186 * return that bandwidth (it may be smaller than the
1187 * reduced one). Otherwise return the remote (possibly
1190 ret = tb_dp_wait_dprx(tunnel, 150);
1192 if (ret == -ETIMEDOUT)
1193 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1198 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1201 } else if (sw->generation >= 2) {
1202 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1206 /* No bandwidth management for legacy devices */
1212 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1214 *consumed_down = tb_dp_bandwidth(rate, lanes);
1216 *consumed_up = tb_dp_bandwidth(rate, lanes);
1223 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1225 struct tb_port *port = hop->in_port;
1226 struct tb_switch *sw = port->sw;
1228 if (tb_port_use_credit_allocation(port))
1229 hop->initial_credits = sw->min_dp_aux_credits;
1231 hop->initial_credits = 1;
1234 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1236 struct tb_path_hop *hop;
1238 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1239 path->egress_shared_buffer = TB_PATH_NONE;
1240 path->ingress_fc_enable = TB_PATH_ALL;
1241 path->ingress_shared_buffer = TB_PATH_NONE;
1242 path->priority = TB_DP_AUX_PRIORITY;
1243 path->weight = TB_DP_AUX_WEIGHT;
1245 tb_path_for_each_hop(path, hop) {
1246 tb_dp_init_aux_credits(hop);
1248 tb_init_pm_support(hop);
1252 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1254 struct tb_port *port = hop->in_port;
1255 struct tb_switch *sw = port->sw;
1257 if (tb_port_use_credit_allocation(port)) {
1258 unsigned int nfc_credits;
1259 size_t max_dp_streams;
1261 tb_available_credits(port, &max_dp_streams);
1263 * Read the number of currently allocated NFC credits
1264 * from the lane adapter. Since we only use them for DP
1265 * tunneling we can use that to figure out how many DP
1266 * tunnels already go through the lane adapter.
1268 nfc_credits = port->config.nfc_credits &
1269 ADP_CS_4_NFC_BUFFERS_MASK;
1270 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1273 hop->nfc_credits = sw->min_dp_main_credits;
1275 hop->nfc_credits = min(port->total_credits - 2, 12U);
1281 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1283 struct tb_path_hop *hop;
1285 path->egress_fc_enable = TB_PATH_NONE;
1286 path->egress_shared_buffer = TB_PATH_NONE;
1287 path->ingress_fc_enable = TB_PATH_NONE;
1288 path->ingress_shared_buffer = TB_PATH_NONE;
1289 path->priority = TB_DP_VIDEO_PRIORITY;
1290 path->weight = TB_DP_VIDEO_WEIGHT;
1292 tb_path_for_each_hop(path, hop) {
1295 ret = tb_dp_init_video_credits(hop);
1299 tb_init_pm_support(hop);
1305 static void tb_dp_dump(struct tb_tunnel *tunnel)
1307 struct tb_port *in, *out;
1308 u32 dp_cap, rate, lanes;
1310 in = tunnel->src_port;
1311 out = tunnel->dst_port;
1313 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1314 in->cap_adap + DP_LOCAL_CAP, 1))
1317 rate = tb_dp_cap_get_rate(dp_cap);
1318 lanes = tb_dp_cap_get_lanes(dp_cap);
1320 tb_tunnel_dbg(tunnel,
1321 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1322 rate, lanes, tb_dp_bandwidth(rate, lanes));
1324 if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1325 out->cap_adap + DP_LOCAL_CAP, 1))
1328 rate = tb_dp_cap_get_rate(dp_cap);
1329 lanes = tb_dp_cap_get_lanes(dp_cap);
1331 tb_tunnel_dbg(tunnel,
1332 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1333 rate, lanes, tb_dp_bandwidth(rate, lanes));
1335 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1336 in->cap_adap + DP_REMOTE_CAP, 1))
1339 rate = tb_dp_cap_get_rate(dp_cap);
1340 lanes = tb_dp_cap_get_lanes(dp_cap);
1342 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1343 rate, lanes, tb_dp_bandwidth(rate, lanes));
1347 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1348 * @tb: Pointer to the domain structure
1349 * @in: DP in adapter
1350 * @alloc_hopid: Allocate HopIDs from visited ports
1352 * If @in adapter is active, follows the tunnel to the DP out adapter
1353 * and back. Returns the discovered tunnel or %NULL if there was no
1356 * Return: DP tunnel or %NULL if no tunnel found.
1358 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1361 struct tb_tunnel *tunnel;
1362 struct tb_port *port;
1363 struct tb_path *path;
1365 if (!tb_dp_port_is_enabled(in))
1368 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1372 tunnel->init = tb_dp_init;
1373 tunnel->deinit = tb_dp_deinit;
1374 tunnel->activate = tb_dp_activate;
1375 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1376 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1377 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1378 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1379 tunnel->src_port = in;
1381 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1382 &tunnel->dst_port, "Video", alloc_hopid);
1384 /* Just disable the DP IN port */
1385 tb_dp_port_enable(in, false);
1388 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1389 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1392 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1395 goto err_deactivate;
1396 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1397 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1399 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1400 &port, "AUX RX", alloc_hopid);
1402 goto err_deactivate;
1403 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1404 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1406 /* Validate that the tunnel is complete */
1407 if (!tb_port_is_dpout(tunnel->dst_port)) {
1408 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1409 goto err_deactivate;
1412 if (!tb_dp_port_is_enabled(tunnel->dst_port))
1413 goto err_deactivate;
1415 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1416 goto err_deactivate;
1418 if (port != tunnel->src_port) {
1419 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1420 goto err_deactivate;
1425 tb_tunnel_dbg(tunnel, "discovered\n");
1429 tb_tunnel_deactivate(tunnel);
1431 tb_tunnel_free(tunnel);
1437 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1438 * @tb: Pointer to the domain structure
1439 * @in: DP in adapter port
1440 * @out: DP out adapter port
1441 * @link_nr: Preferred lane adapter when the link is not bonded
1442 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1444 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1445 * (%0 if not limited)
1447 * Allocates a tunnel between @in and @out that is capable of tunneling
1448 * Display Port traffic.
1450 * Return: Returns a tb_tunnel on success or NULL on failure.
1452 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1453 struct tb_port *out, int link_nr,
1454 int max_up, int max_down)
1456 struct tb_tunnel *tunnel;
1457 struct tb_path **paths;
1458 struct tb_path *path;
1461 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1464 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1468 tunnel->init = tb_dp_init;
1469 tunnel->deinit = tb_dp_deinit;
1470 tunnel->activate = tb_dp_activate;
1471 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1472 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1473 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1474 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1475 tunnel->src_port = in;
1476 tunnel->dst_port = out;
1477 tunnel->max_up = max_up;
1478 tunnel->max_down = max_down;
1480 paths = tunnel->paths;
1481 pm_support = usb4_switch_version(in->sw) >= 2;
1483 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1487 tb_dp_init_video_path(path, pm_support);
1488 paths[TB_DP_VIDEO_PATH_OUT] = path;
1490 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1491 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1494 tb_dp_init_aux_path(path, pm_support);
1495 paths[TB_DP_AUX_PATH_OUT] = path;
1497 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1498 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1501 tb_dp_init_aux_path(path, pm_support);
1502 paths[TB_DP_AUX_PATH_IN] = path;
1507 tb_tunnel_free(tunnel);
1511 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1513 const struct tb_switch *sw = port->sw;
1516 credits = tb_available_credits(port, NULL);
1517 if (tb_acpi_may_tunnel_pcie())
1518 credits -= sw->max_pcie_credits;
1519 credits -= port->dma_credits;
1521 return credits > 0 ? credits : 0;
1524 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1526 struct tb_port *port = hop->in_port;
1528 if (tb_port_use_credit_allocation(port)) {
1529 unsigned int available = tb_dma_available_credits(port);
1532 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1533 * DMA path cannot be established.
1535 if (available < TB_MIN_DMA_CREDITS)
1538 while (credits > available)
1541 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1544 port->dma_credits += credits;
1546 if (tb_port_is_null(port))
1547 credits = port->bonded ? 14 : 6;
1549 credits = min(port->total_credits, credits);
1552 hop->initial_credits = credits;
1556 /* Path from lane adapter to NHI */
1557 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1559 struct tb_path_hop *hop;
1560 unsigned int i, tmp;
1562 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1563 path->ingress_fc_enable = TB_PATH_ALL;
1564 path->egress_shared_buffer = TB_PATH_NONE;
1565 path->ingress_shared_buffer = TB_PATH_NONE;
1566 path->priority = TB_DMA_PRIORITY;
1567 path->weight = TB_DMA_WEIGHT;
1568 path->clear_fc = true;
1571 * First lane adapter is the one connected to the remote host.
1572 * We don't tunnel other traffic over this link so can use all
1573 * the credits (except the ones reserved for control traffic).
1575 hop = &path->hops[0];
1576 tmp = min(tb_usable_credits(hop->in_port), credits);
1577 hop->initial_credits = tmp;
1578 hop->in_port->dma_credits += tmp;
1580 for (i = 1; i < path->path_length; i++) {
1583 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1591 /* Path from NHI to lane adapter */
1592 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1594 struct tb_path_hop *hop;
1596 path->egress_fc_enable = TB_PATH_ALL;
1597 path->ingress_fc_enable = TB_PATH_ALL;
1598 path->egress_shared_buffer = TB_PATH_NONE;
1599 path->ingress_shared_buffer = TB_PATH_NONE;
1600 path->priority = TB_DMA_PRIORITY;
1601 path->weight = TB_DMA_WEIGHT;
1602 path->clear_fc = true;
1604 tb_path_for_each_hop(path, hop) {
1607 ret = tb_dma_reserve_credits(hop, credits);
1615 static void tb_dma_release_credits(struct tb_path_hop *hop)
1617 struct tb_port *port = hop->in_port;
1619 if (tb_port_use_credit_allocation(port)) {
1620 port->dma_credits -= hop->initial_credits;
1622 tb_port_dbg(port, "released %u DMA path credits\n",
1623 hop->initial_credits);
1627 static void tb_dma_deinit_path(struct tb_path *path)
1629 struct tb_path_hop *hop;
1631 tb_path_for_each_hop(path, hop)
1632 tb_dma_release_credits(hop);
1635 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1639 for (i = 0; i < tunnel->npaths; i++) {
1640 if (!tunnel->paths[i])
1642 tb_dma_deinit_path(tunnel->paths[i]);
1647 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1648 * @tb: Pointer to the domain structure
1649 * @nhi: Host controller port
1650 * @dst: Destination null port which the other domain is connected to
1651 * @transmit_path: HopID used for transmitting packets
1652 * @transmit_ring: NHI ring number used to send packets towards the
1653 * other domain. Set to %-1 if TX path is not needed.
1654 * @receive_path: HopID used for receiving packets
1655 * @receive_ring: NHI ring number used to receive packets from the
1656 * other domain. Set to %-1 if RX path is not needed.
1658 * Return: Returns a tb_tunnel on success or NULL on failure.
1660 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1661 struct tb_port *dst, int transmit_path,
1662 int transmit_ring, int receive_path,
1665 struct tb_tunnel *tunnel;
1666 size_t npaths = 0, i = 0;
1667 struct tb_path *path;
1670 /* Ring 0 is reserved for control channel */
1671 if (WARN_ON(!receive_ring || !transmit_ring))
1674 if (receive_ring > 0)
1676 if (transmit_ring > 0)
1679 if (WARN_ON(!npaths))
1682 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1686 tunnel->src_port = nhi;
1687 tunnel->dst_port = dst;
1688 tunnel->deinit = tb_dma_deinit;
1690 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1692 if (receive_ring > 0) {
1693 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1697 tunnel->paths[i++] = path;
1698 if (tb_dma_init_rx_path(path, credits)) {
1699 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1704 if (transmit_ring > 0) {
1705 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1709 tunnel->paths[i++] = path;
1710 if (tb_dma_init_tx_path(path, credits)) {
1711 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1719 tb_tunnel_free(tunnel);
1724 * tb_tunnel_match_dma() - Match DMA tunnel
1725 * @tunnel: Tunnel to match
1726 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1727 * @transmit_ring: NHI ring number used to send packets towards the
1728 * other domain. Pass %-1 to ignore.
1729 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1730 * @receive_ring: NHI ring number used to receive packets from the
1731 * other domain. Pass %-1 to ignore.
1733 * This function can be used to match specific DMA tunnel, if there are
1734 * multiple DMA tunnels going through the same XDomain connection.
1735 * Returns true if there is match and false otherwise.
1737 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1738 int transmit_ring, int receive_path, int receive_ring)
1740 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1743 if (!receive_ring || !transmit_ring)
1746 for (i = 0; i < tunnel->npaths; i++) {
1747 const struct tb_path *path = tunnel->paths[i];
1752 if (tb_port_is_nhi(path->hops[0].in_port))
1754 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1758 if (transmit_ring > 0 || transmit_path > 0) {
1761 if (transmit_ring > 0 &&
1762 (tx_path->hops[0].in_hop_index != transmit_ring))
1764 if (transmit_path > 0 &&
1765 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1769 if (receive_ring > 0 || receive_path > 0) {
1772 if (receive_path > 0 &&
1773 (rx_path->hops[0].in_hop_index != receive_path))
1775 if (receive_ring > 0 &&
1776 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1783 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1785 int ret, up_max_rate, down_max_rate;
1787 ret = usb4_usb3_port_max_link_rate(up);
1792 ret = usb4_usb3_port_max_link_rate(down);
1795 down_max_rate = ret;
1797 return min(up_max_rate, down_max_rate);
1800 static int tb_usb3_init(struct tb_tunnel *tunnel)
1802 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1803 tunnel->allocated_up, tunnel->allocated_down);
1805 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1806 &tunnel->allocated_up,
1807 &tunnel->allocated_down);
1810 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1814 res = tb_usb3_port_enable(tunnel->src_port, activate);
1818 if (tb_port_is_usb3_up(tunnel->dst_port))
1819 return tb_usb3_port_enable(tunnel->dst_port, activate);
1824 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1825 int *consumed_up, int *consumed_down)
1827 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1828 int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1831 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1832 * take that it into account here.
1834 *consumed_up = tunnel->allocated_up *
1835 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1836 *consumed_down = tunnel->allocated_down *
1837 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1839 if (tb_port_get_link_generation(port) >= 4) {
1840 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1841 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1847 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1851 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1852 &tunnel->allocated_up,
1853 &tunnel->allocated_down);
1857 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1858 tunnel->allocated_up, tunnel->allocated_down);
1862 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1864 int *available_down)
1866 int ret, max_rate, allocate_up, allocate_down;
1868 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1870 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1875 * 90% of the max rate can be allocated for isochronous
1878 max_rate = ret * 90 / 100;
1880 /* No need to reclaim if already at maximum */
1881 if (tunnel->allocated_up >= max_rate &&
1882 tunnel->allocated_down >= max_rate)
1885 /* Don't go lower than what is already allocated */
1886 allocate_up = min(max_rate, *available_up);
1887 if (allocate_up < tunnel->allocated_up)
1888 allocate_up = tunnel->allocated_up;
1890 allocate_down = min(max_rate, *available_down);
1891 if (allocate_down < tunnel->allocated_down)
1892 allocate_down = tunnel->allocated_down;
1894 /* If no changes no need to do more */
1895 if (allocate_up == tunnel->allocated_up &&
1896 allocate_down == tunnel->allocated_down)
1899 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1902 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1906 tunnel->allocated_up = allocate_up;
1907 *available_up -= tunnel->allocated_up;
1909 tunnel->allocated_down = allocate_down;
1910 *available_down -= tunnel->allocated_down;
1912 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1913 tunnel->allocated_up, tunnel->allocated_down);
1916 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1918 struct tb_port *port = hop->in_port;
1919 struct tb_switch *sw = port->sw;
1920 unsigned int credits;
1922 if (tb_port_use_credit_allocation(port)) {
1923 credits = sw->max_usb3_credits;
1925 if (tb_port_is_null(port))
1926 credits = port->bonded ? 32 : 16;
1931 hop->initial_credits = credits;
1934 static void tb_usb3_init_path(struct tb_path *path)
1936 struct tb_path_hop *hop;
1938 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1939 path->egress_shared_buffer = TB_PATH_NONE;
1940 path->ingress_fc_enable = TB_PATH_ALL;
1941 path->ingress_shared_buffer = TB_PATH_NONE;
1942 path->priority = TB_USB3_PRIORITY;
1943 path->weight = TB_USB3_WEIGHT;
1944 path->drop_packages = 0;
1946 tb_path_for_each_hop(path, hop)
1947 tb_usb3_init_credits(hop);
1951 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1952 * @tb: Pointer to the domain structure
1953 * @down: USB3 downstream adapter
1954 * @alloc_hopid: Allocate HopIDs from visited ports
1956 * If @down adapter is active, follows the tunnel to the USB3 upstream
1957 * adapter and back. Returns the discovered tunnel or %NULL if there was
1960 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1963 struct tb_tunnel *tunnel;
1964 struct tb_path *path;
1966 if (!tb_usb3_port_is_enabled(down))
1969 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1973 tunnel->activate = tb_usb3_activate;
1974 tunnel->src_port = down;
1977 * Discover both paths even if they are not complete. We will
1978 * clean them up by calling tb_tunnel_deactivate() below in that
1981 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1982 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1984 /* Just disable the downstream port */
1985 tb_usb3_port_enable(down, false);
1988 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1989 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1991 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1992 "USB3 Up", alloc_hopid);
1994 goto err_deactivate;
1995 tunnel->paths[TB_USB3_PATH_UP] = path;
1996 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1998 /* Validate that the tunnel is complete */
1999 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2000 tb_port_warn(tunnel->dst_port,
2001 "path does not end on an USB3 adapter, cleaning up\n");
2002 goto err_deactivate;
2005 if (down != tunnel->src_port) {
2006 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2007 goto err_deactivate;
2010 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2011 tb_tunnel_warn(tunnel,
2012 "tunnel is not fully activated, cleaning up\n");
2013 goto err_deactivate;
2016 if (!tb_route(down->sw)) {
2020 * Read the initial bandwidth allocation for the first
2023 ret = usb4_usb3_port_allocated_bandwidth(down,
2024 &tunnel->allocated_up, &tunnel->allocated_down);
2026 goto err_deactivate;
2028 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2029 tunnel->allocated_up, tunnel->allocated_down);
2031 tunnel->init = tb_usb3_init;
2032 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2033 tunnel->release_unused_bandwidth =
2034 tb_usb3_release_unused_bandwidth;
2035 tunnel->reclaim_available_bandwidth =
2036 tb_usb3_reclaim_available_bandwidth;
2039 tb_tunnel_dbg(tunnel, "discovered\n");
2043 tb_tunnel_deactivate(tunnel);
2045 tb_tunnel_free(tunnel);
2051 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2052 * @tb: Pointer to the domain structure
2053 * @up: USB3 upstream adapter port
2054 * @down: USB3 downstream adapter port
2055 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2057 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2058 * (%0 if not limited).
2060 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2061 * @TB_TYPE_USB3_DOWN.
2063 * Return: Returns a tb_tunnel on success or %NULL on failure.
2065 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2066 struct tb_port *down, int max_up,
2069 struct tb_tunnel *tunnel;
2070 struct tb_path *path;
2074 * Check that we have enough bandwidth available for the new
2077 if (max_up > 0 || max_down > 0) {
2078 max_rate = tb_usb3_max_link_rate(down, up);
2082 /* Only 90% can be allocated for USB3 isochronous transfers */
2083 max_rate = max_rate * 90 / 100;
2084 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2087 if (max_rate > max_up || max_rate > max_down) {
2088 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2093 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2097 tunnel->activate = tb_usb3_activate;
2098 tunnel->src_port = down;
2099 tunnel->dst_port = up;
2100 tunnel->max_up = max_up;
2101 tunnel->max_down = max_down;
2103 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2106 tb_tunnel_free(tunnel);
2109 tb_usb3_init_path(path);
2110 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2112 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2115 tb_tunnel_free(tunnel);
2118 tb_usb3_init_path(path);
2119 tunnel->paths[TB_USB3_PATH_UP] = path;
2121 if (!tb_route(down->sw)) {
2122 tunnel->allocated_up = max_rate;
2123 tunnel->allocated_down = max_rate;
2125 tunnel->init = tb_usb3_init;
2126 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2127 tunnel->release_unused_bandwidth =
2128 tb_usb3_release_unused_bandwidth;
2129 tunnel->reclaim_available_bandwidth =
2130 tb_usb3_reclaim_available_bandwidth;
2137 * tb_tunnel_free() - free a tunnel
2138 * @tunnel: Tunnel to be freed
2140 * Frees a tunnel. The tunnel does not need to be deactivated.
2142 void tb_tunnel_free(struct tb_tunnel *tunnel)
2150 tunnel->deinit(tunnel);
2152 for (i = 0; i < tunnel->npaths; i++) {
2153 if (tunnel->paths[i])
2154 tb_path_free(tunnel->paths[i]);
2157 kfree(tunnel->paths);
2162 * tb_tunnel_is_invalid - check whether an activated path is still valid
2163 * @tunnel: Tunnel to check
2165 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2169 for (i = 0; i < tunnel->npaths; i++) {
2170 WARN_ON(!tunnel->paths[i]->activated);
2171 if (tb_path_is_invalid(tunnel->paths[i]))
2179 * tb_tunnel_restart() - activate a tunnel after a hardware reset
2180 * @tunnel: Tunnel to restart
2182 * Return: 0 on success and negative errno in case if failure
2184 int tb_tunnel_restart(struct tb_tunnel *tunnel)
2188 tb_tunnel_dbg(tunnel, "activating\n");
2191 * Make sure all paths are properly disabled before enabling
2194 for (i = 0; i < tunnel->npaths; i++) {
2195 if (tunnel->paths[i]->activated) {
2196 tb_path_deactivate(tunnel->paths[i]);
2197 tunnel->paths[i]->activated = false;
2202 res = tunnel->init(tunnel);
2207 for (i = 0; i < tunnel->npaths; i++) {
2208 res = tb_path_activate(tunnel->paths[i]);
2213 if (tunnel->activate) {
2214 res = tunnel->activate(tunnel, true);
2222 tb_tunnel_warn(tunnel, "activation failed\n");
2223 tb_tunnel_deactivate(tunnel);
2228 * tb_tunnel_activate() - activate a tunnel
2229 * @tunnel: Tunnel to activate
2231 * Return: Returns 0 on success or an error code on failure.
2233 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2237 for (i = 0; i < tunnel->npaths; i++) {
2238 if (tunnel->paths[i]->activated) {
2239 tb_tunnel_WARN(tunnel,
2240 "trying to activate an already activated tunnel\n");
2245 return tb_tunnel_restart(tunnel);
2249 * tb_tunnel_deactivate() - deactivate a tunnel
2250 * @tunnel: Tunnel to deactivate
2252 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2256 tb_tunnel_dbg(tunnel, "deactivating\n");
2258 if (tunnel->activate)
2259 tunnel->activate(tunnel, false);
2261 for (i = 0; i < tunnel->npaths; i++) {
2262 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2263 tb_path_deactivate(tunnel->paths[i]);
2268 * tb_tunnel_port_on_path() - Does the tunnel go through port
2269 * @tunnel: Tunnel to check
2270 * @port: Port to check
2272 * Returns true if @tunnel goes through @port (direction does not matter),
2275 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2276 const struct tb_port *port)
2280 for (i = 0; i < tunnel->npaths; i++) {
2281 if (!tunnel->paths[i])
2284 if (tb_path_port_on_path(tunnel->paths[i], port))
2291 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2295 for (i = 0; i < tunnel->npaths; i++) {
2296 if (!tunnel->paths[i])
2298 if (!tunnel->paths[i]->activated)
2306 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2307 * @tunnel: Tunnel to check
2308 * @max_up: Maximum upstream bandwidth in Mb/s
2309 * @max_down: Maximum downstream bandwidth in Mb/s
2311 * Returns maximum possible bandwidth this tunnel can go if not limited
2312 * by other bandwidth clients. If the tunnel does not support this
2313 * returns %-EOPNOTSUPP.
2315 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2318 if (!tb_tunnel_is_active(tunnel))
2321 if (tunnel->maximum_bandwidth)
2322 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2327 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2328 * @tunnel: Tunnel to check
2329 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2330 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2333 * Returns the bandwidth allocated for the tunnel. This may be higher
2334 * than what the tunnel actually consumes.
2336 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2337 int *allocated_down)
2339 if (!tb_tunnel_is_active(tunnel))
2342 if (tunnel->allocated_bandwidth)
2343 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2349 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2350 * @tunnel: Tunnel whose bandwidth allocation to change
2351 * @alloc_up: New upstream bandwidth in Mb/s
2352 * @alloc_down: New downstream bandwidth in Mb/s
2354 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2355 * and updates @alloc_up and @alloc_down to that was actually allocated
2356 * (it may not be the same as passed originally). Returns negative errno
2357 * in case of failure.
2359 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2362 if (!tb_tunnel_is_active(tunnel))
2365 if (tunnel->alloc_bandwidth)
2366 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2372 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2373 * @tunnel: Tunnel to check
2374 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2376 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2379 * Stores the amount of isochronous bandwidth @tunnel consumes in
2380 * @consumed_up and @consumed_down. In case of success returns %0,
2381 * negative errno otherwise.
2383 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2386 int up_bw = 0, down_bw = 0;
2388 if (!tb_tunnel_is_active(tunnel))
2391 if (tunnel->consumed_bandwidth) {
2394 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2398 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2404 *consumed_up = up_bw;
2406 *consumed_down = down_bw;
2412 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2413 * @tunnel: Tunnel whose unused bandwidth to release
2415 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2416 * moment) this function makes it to release all the unused bandwidth.
2418 * Returns %0 in case of success and negative errno otherwise.
2420 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2422 if (!tb_tunnel_is_active(tunnel))
2425 if (tunnel->release_unused_bandwidth) {
2428 ret = tunnel->release_unused_bandwidth(tunnel);
2437 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2438 * @tunnel: Tunnel reclaiming available bandwidth
2439 * @available_up: Available upstream bandwidth (in Mb/s)
2440 * @available_down: Available downstream bandwidth (in Mb/s)
2442 * Reclaims bandwidth from @available_up and @available_down and updates
2443 * the variables accordingly (e.g decreases both according to what was
2444 * reclaimed by the tunnel). If nothing was reclaimed the values are
2447 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2449 int *available_down)
2451 if (!tb_tunnel_is_active(tunnel))
2454 if (tunnel->reclaim_available_bandwidth)
2455 tunnel->reclaim_available_bandwidth(tunnel, available_up,
2459 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2461 return tb_tunnel_names[tunnel->type];