1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
17 /* PCIe adapters use always HopID of 8 for both directions */
18 #define TB_PCI_HOPID 8
20 #define TB_PCI_PATH_DOWN 0
21 #define TB_PCI_PATH_UP 1
23 /* USB3 adapters use always HopID of 8 for both directions */
24 #define TB_USB3_HOPID 8
26 #define TB_USB3_PATH_DOWN 0
27 #define TB_USB3_PATH_UP 1
29 /* DP adapters use HopID 8 for AUX and 9 for Video */
30 #define TB_DP_AUX_TX_HOPID 8
31 #define TB_DP_AUX_RX_HOPID 8
32 #define TB_DP_VIDEO_HOPID 9
34 #define TB_DP_VIDEO_PATH_OUT 0
35 #define TB_DP_AUX_PATH_OUT 1
36 #define TB_DP_AUX_PATH_IN 2
38 /* Minimum number of credits needed for PCIe path */
39 #define TB_MIN_PCIE_CREDITS 6U
41 * Number of credits we try to allocate for each DMA path if not limited
42 * by the host router baMaxHI.
44 #define TB_DMA_CREDITS 14U
45 /* Minimum number of credits for DMA path */
46 #define TB_MIN_DMA_CREDITS 1U
48 static bool bw_alloc_mode = true;
49 module_param(bw_alloc_mode, bool, 0444);
50 MODULE_PARM_DESC(bw_alloc_mode,
51 "enable bandwidth allocation mode if supported (default: true)");
53 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
55 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
57 struct tb_tunnel *__tunnel = (tunnel); \
58 level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
59 tb_route(__tunnel->src_port->sw), \
60 __tunnel->src_port->port, \
61 tb_route(__tunnel->dst_port->sw), \
62 __tunnel->dst_port->port, \
63 tb_tunnel_names[__tunnel->type], \
67 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
68 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
69 #define tb_tunnel_warn(tunnel, fmt, arg...) \
70 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
71 #define tb_tunnel_info(tunnel, fmt, arg...) \
72 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
73 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
74 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
76 static inline unsigned int tb_usable_credits(const struct tb_port *port)
78 return port->total_credits - port->ctl_credits;
82 * tb_available_credits() - Available credits for PCIe and DMA
83 * @port: Lane adapter to check
84 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
85 * streams possible through this lane adapter
87 static unsigned int tb_available_credits(const struct tb_port *port,
88 size_t *max_dp_streams)
90 const struct tb_switch *sw = port->sw;
91 int credits, usb3, pcie, spare;
94 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
95 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
97 if (tb_acpi_is_xdomain_allowed()) {
98 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
99 /* Add some credits for potential second DMA tunnel */
100 spare += TB_MIN_DMA_CREDITS;
105 credits = tb_usable_credits(port);
106 if (tb_acpi_may_tunnel_dp()) {
108 * Maximum number of DP streams possible through the
111 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
112 ndp = (credits - (usb3 + pcie + spare)) /
113 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
119 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
123 *max_dp_streams = ndp;
125 return credits > 0 ? credits : 0;
128 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
129 enum tb_tunnel_type type)
131 struct tb_tunnel *tunnel;
133 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
137 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
138 if (!tunnel->paths) {
139 tb_tunnel_free(tunnel);
143 INIT_LIST_HEAD(&tunnel->list);
145 tunnel->npaths = npaths;
151 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
155 res = tb_pci_port_enable(tunnel->src_port, activate);
159 if (tb_port_is_pcie_up(tunnel->dst_port))
160 return tb_pci_port_enable(tunnel->dst_port, activate);
165 static int tb_pci_init_credits(struct tb_path_hop *hop)
167 struct tb_port *port = hop->in_port;
168 struct tb_switch *sw = port->sw;
169 unsigned int credits;
171 if (tb_port_use_credit_allocation(port)) {
172 unsigned int available;
174 available = tb_available_credits(port, NULL);
175 credits = min(sw->max_pcie_credits, available);
177 if (credits < TB_MIN_PCIE_CREDITS)
180 credits = max(TB_MIN_PCIE_CREDITS, credits);
182 if (tb_port_is_null(port))
183 credits = port->bonded ? 32 : 16;
188 hop->initial_credits = credits;
192 static int tb_pci_init_path(struct tb_path *path)
194 struct tb_path_hop *hop;
196 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
197 path->egress_shared_buffer = TB_PATH_NONE;
198 path->ingress_fc_enable = TB_PATH_ALL;
199 path->ingress_shared_buffer = TB_PATH_NONE;
202 path->drop_packages = 0;
204 tb_path_for_each_hop(path, hop) {
207 ret = tb_pci_init_credits(hop);
216 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
217 * @tb: Pointer to the domain structure
218 * @down: PCIe downstream adapter
219 * @alloc_hopid: Allocate HopIDs from visited ports
221 * If @down adapter is active, follows the tunnel to the PCIe upstream
222 * adapter and back. Returns the discovered tunnel or %NULL if there was
225 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
228 struct tb_tunnel *tunnel;
229 struct tb_path *path;
231 if (!tb_pci_port_is_enabled(down))
234 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
238 tunnel->activate = tb_pci_activate;
239 tunnel->src_port = down;
242 * Discover both paths even if they are not complete. We will
243 * clean them up by calling tb_tunnel_deactivate() below in that
246 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
247 &tunnel->dst_port, "PCIe Up", alloc_hopid);
249 /* Just disable the downstream port */
250 tb_pci_port_enable(down, false);
253 tunnel->paths[TB_PCI_PATH_UP] = path;
254 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
257 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
258 "PCIe Down", alloc_hopid);
261 tunnel->paths[TB_PCI_PATH_DOWN] = path;
262 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
265 /* Validate that the tunnel is complete */
266 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
267 tb_port_warn(tunnel->dst_port,
268 "path does not end on a PCIe adapter, cleaning up\n");
272 if (down != tunnel->src_port) {
273 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
277 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
278 tb_tunnel_warn(tunnel,
279 "tunnel is not fully activated, cleaning up\n");
283 tb_tunnel_dbg(tunnel, "discovered\n");
287 tb_tunnel_deactivate(tunnel);
289 tb_tunnel_free(tunnel);
295 * tb_tunnel_alloc_pci() - allocate a pci tunnel
296 * @tb: Pointer to the domain structure
297 * @up: PCIe upstream adapter port
298 * @down: PCIe downstream adapter port
300 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
303 * Return: Returns a tb_tunnel on success or NULL on failure.
305 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
306 struct tb_port *down)
308 struct tb_tunnel *tunnel;
309 struct tb_path *path;
311 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
315 tunnel->activate = tb_pci_activate;
316 tunnel->src_port = down;
317 tunnel->dst_port = up;
319 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
323 tunnel->paths[TB_PCI_PATH_DOWN] = path;
324 if (tb_pci_init_path(path))
327 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
331 tunnel->paths[TB_PCI_PATH_UP] = path;
332 if (tb_pci_init_path(path))
338 tb_tunnel_free(tunnel);
342 static bool tb_dp_is_usb4(const struct tb_switch *sw)
344 /* Titan Ridge DP adapters need the same treatment as USB4 */
345 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
348 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
351 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
355 /* Both ends need to support this */
356 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
359 ret = tb_port_read(out, &val, TB_CFG_PORT,
360 out->cap_adap + DP_STATUS_CTRL, 1);
364 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
366 ret = tb_port_write(out, &val, TB_CFG_PORT,
367 out->cap_adap + DP_STATUS_CTRL, 1);
372 ret = tb_port_read(out, &val, TB_CFG_PORT,
373 out->cap_adap + DP_STATUS_CTRL, 1);
376 if (!(val & DP_STATUS_CTRL_CMHS))
378 usleep_range(100, 150);
379 } while (ktime_before(ktime_get(), timeout));
384 static inline u32 tb_dp_cap_get_rate(u32 val)
386 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
389 case DP_COMMON_CAP_RATE_RBR:
391 case DP_COMMON_CAP_RATE_HBR:
393 case DP_COMMON_CAP_RATE_HBR2:
395 case DP_COMMON_CAP_RATE_HBR3:
402 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
404 val &= ~DP_COMMON_CAP_RATE_MASK;
407 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
410 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
413 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
416 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
419 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
425 static inline u32 tb_dp_cap_get_lanes(u32 val)
427 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
430 case DP_COMMON_CAP_1_LANE:
432 case DP_COMMON_CAP_2_LANES:
434 case DP_COMMON_CAP_4_LANES:
441 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
443 val &= ~DP_COMMON_CAP_LANES_MASK;
446 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
450 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
453 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
456 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
462 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
464 /* Tunneling removes the DP 8b/10b encoding */
465 return rate * lanes * 8 / 10;
468 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
469 u32 out_rate, u32 out_lanes, u32 *new_rate,
472 static const u32 dp_bw[][2] = {
474 { 8100, 4 }, /* 25920 Mb/s */
475 { 5400, 4 }, /* 17280 Mb/s */
476 { 8100, 2 }, /* 12960 Mb/s */
477 { 2700, 4 }, /* 8640 Mb/s */
478 { 5400, 2 }, /* 8640 Mb/s */
479 { 8100, 1 }, /* 6480 Mb/s */
480 { 1620, 4 }, /* 5184 Mb/s */
481 { 5400, 1 }, /* 4320 Mb/s */
482 { 2700, 2 }, /* 4320 Mb/s */
483 { 1620, 2 }, /* 2592 Mb/s */
484 { 2700, 1 }, /* 2160 Mb/s */
485 { 1620, 1 }, /* 1296 Mb/s */
490 * Find a combination that can fit into max_bw and does not
491 * exceed the maximum rate and lanes supported by the DP OUT and
494 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
495 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
498 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
501 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
502 *new_rate = dp_bw[i][0];
503 *new_lanes = dp_bw[i][1];
511 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
513 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
514 struct tb_port *out = tunnel->dst_port;
515 struct tb_port *in = tunnel->src_port;
519 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
520 * newer generation hardware.
522 if (in->sw->generation < 2 || out->sw->generation < 2)
526 * Perform connection manager handshake between IN and OUT ports
527 * before capabilities exchange can take place.
529 ret = tb_dp_cm_handshake(in, out, 1500);
533 /* Read both DP_LOCAL_CAP registers */
534 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
535 in->cap_adap + DP_LOCAL_CAP, 1);
539 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
540 out->cap_adap + DP_LOCAL_CAP, 1);
544 /* Write IN local caps to OUT remote caps */
545 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
546 out->cap_adap + DP_REMOTE_CAP, 1);
550 in_rate = tb_dp_cap_get_rate(in_dp_cap);
551 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
552 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
553 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
556 * If the tunnel bandwidth is limited (max_bw is set) then see
557 * if we need to reduce bandwidth to fit there.
559 out_rate = tb_dp_cap_get_rate(out_dp_cap);
560 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
561 bw = tb_dp_bandwidth(out_rate, out_lanes);
562 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
563 out_rate, out_lanes, bw);
565 if (in->sw->config.depth < out->sw->config.depth)
566 max_bw = tunnel->max_down;
568 max_bw = tunnel->max_up;
570 if (max_bw && bw > max_bw) {
571 u32 new_rate, new_lanes, new_bw;
573 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
574 out_rate, out_lanes, &new_rate,
577 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
581 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
582 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
583 new_rate, new_lanes, new_bw);
586 * Set new rate and number of lanes before writing it to
587 * the IN port remote caps.
589 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
590 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
594 * Titan Ridge does not disable AUX timers when it gets
595 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
598 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
599 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
600 tb_port_dbg(out, "disabling LTTPR\n");
603 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
604 in->cap_adap + DP_REMOTE_CAP, 1);
607 static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
609 int ret, estimated_bw, granularity, tmp;
610 struct tb_port *out = tunnel->dst_port;
611 struct tb_port *in = tunnel->src_port;
612 u32 out_dp_cap, out_rate, out_lanes;
613 u32 in_dp_cap, in_rate, in_lanes;
619 ret = usb4_dp_port_set_cm_bw_mode_supported(in, true);
623 ret = usb4_dp_port_set_group_id(in, in->group->index);
628 * Get the non-reduced rate and lanes based on the lowest
629 * capability of both adapters.
631 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
632 in->cap_adap + DP_LOCAL_CAP, 1);
636 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
637 out->cap_adap + DP_LOCAL_CAP, 1);
641 in_rate = tb_dp_cap_get_rate(in_dp_cap);
642 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
643 out_rate = tb_dp_cap_get_rate(out_dp_cap);
644 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
646 rate = min(in_rate, out_rate);
647 lanes = min(in_lanes, out_lanes);
648 tmp = tb_dp_bandwidth(rate, lanes);
650 tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
653 ret = usb4_dp_port_set_nrd(in, rate, lanes);
657 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
661 tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
664 * Returns -EINVAL if granularity above is outside of the
667 ret = usb4_dp_port_set_granularity(in, granularity);
672 * Bandwidth estimation is pretty much what we have in
673 * max_up/down fields. For discovery we just read what the
674 * estimation was set to.
676 if (in->sw->config.depth < out->sw->config.depth)
677 estimated_bw = tunnel->max_down;
679 estimated_bw = tunnel->max_up;
681 tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
683 ret = usb4_dp_port_set_estimated_bw(in, estimated_bw);
687 /* Initial allocation should be 0 according the spec */
688 ret = usb4_dp_port_allocate_bw(in, 0);
692 tb_port_dbg(in, "bandwidth allocation mode enabled\n");
696 static int tb_dp_init(struct tb_tunnel *tunnel)
698 struct tb_port *in = tunnel->src_port;
699 struct tb_switch *sw = in->sw;
700 struct tb *tb = in->sw->tb;
703 ret = tb_dp_xchg_caps(tunnel);
707 if (!tb_switch_is_usb4(sw))
710 if (!usb4_dp_port_bw_mode_supported(in))
713 tb_port_dbg(in, "bandwidth allocation mode supported\n");
715 ret = usb4_dp_port_set_cm_id(in, tb->index);
719 return tb_dp_bw_alloc_mode_enable(tunnel);
722 static void tb_dp_deinit(struct tb_tunnel *tunnel)
724 struct tb_port *in = tunnel->src_port;
726 if (!usb4_dp_port_bw_mode_supported(in))
728 if (usb4_dp_port_bw_mode_enabled(in)) {
729 usb4_dp_port_set_cm_bw_mode_supported(in, false);
730 tb_port_dbg(in, "bandwidth allocation mode disabled\n");
734 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
739 struct tb_path **paths;
742 paths = tunnel->paths;
743 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
745 tb_dp_port_set_hops(tunnel->src_port,
746 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
747 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
748 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
750 tb_dp_port_set_hops(tunnel->dst_port,
751 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
752 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
753 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
755 tb_dp_port_hpd_clear(tunnel->src_port);
756 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
757 if (tb_port_is_dpout(tunnel->dst_port))
758 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
761 ret = tb_dp_port_enable(tunnel->src_port, active);
765 if (tb_port_is_dpout(tunnel->dst_port))
766 return tb_dp_port_enable(tunnel->dst_port, active);
771 /* max_bw is rounded up to next granularity */
772 static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
774 struct tb_port *in = tunnel->src_port;
775 int ret, rate, lanes, nrd_bw;
777 ret = usb4_dp_port_nrd(in, &rate, &lanes);
781 nrd_bw = tb_dp_bandwidth(rate, lanes);
784 ret = usb4_dp_port_granularity(in);
787 *max_bw = roundup(nrd_bw, ret);
793 static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
794 int *consumed_up, int *consumed_down)
796 struct tb_port *out = tunnel->dst_port;
797 struct tb_port *in = tunnel->src_port;
798 int ret, allocated_bw, max_bw;
800 if (!usb4_dp_port_bw_mode_enabled(in))
803 if (!tunnel->bw_mode)
806 /* Read what was allocated previously if any */
807 ret = usb4_dp_port_allocated_bw(in);
812 ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
815 if (allocated_bw == max_bw)
818 tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
821 if (in->sw->config.depth < out->sw->config.depth) {
823 *consumed_down = allocated_bw;
825 *consumed_up = allocated_bw;
832 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
835 struct tb_port *out = tunnel->dst_port;
836 struct tb_port *in = tunnel->src_port;
839 * If we have already set the allocated bandwidth then use that.
840 * Otherwise we read it from the DPRX.
842 if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) {
843 int ret, allocated_bw, max_bw;
845 ret = usb4_dp_port_allocated_bw(in);
850 ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
853 if (allocated_bw == max_bw)
856 if (in->sw->config.depth < out->sw->config.depth) {
858 *allocated_down = allocated_bw;
860 *allocated_up = allocated_bw;
866 return tunnel->consumed_bandwidth(tunnel, allocated_up,
870 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
873 struct tb_port *out = tunnel->dst_port;
874 struct tb_port *in = tunnel->src_port;
875 int max_bw, ret, tmp;
877 if (!usb4_dp_port_bw_mode_enabled(in))
880 ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
884 if (in->sw->config.depth < out->sw->config.depth) {
885 tmp = min(*alloc_down, max_bw);
886 ret = usb4_dp_port_allocate_bw(in, tmp);
892 tmp = min(*alloc_up, max_bw);
893 ret = usb4_dp_port_allocate_bw(in, tmp);
900 /* Now we can use BW mode registers to figure out the bandwidth */
901 /* TODO: need to handle discovery too */
902 tunnel->bw_mode = true;
906 static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
909 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
910 struct tb_port *in = tunnel->src_port;
913 * Wait for DPRX done. Normally it should be already set for
920 ret = tb_port_read(in, &val, TB_CFG_PORT,
921 in->cap_adap + DP_COMMON_CAP, 1);
925 if (val & DP_COMMON_CAP_DPRX_DONE) {
926 *rate = tb_dp_cap_get_rate(val);
927 *lanes = tb_dp_cap_get_lanes(val);
929 tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
930 tb_dp_bandwidth(*rate, *lanes));
933 usleep_range(100, 150);
934 } while (ktime_before(ktime_get(), timeout));
939 /* Read cap from tunnel DP IN */
940 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
943 struct tb_port *in = tunnel->src_port;
953 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
958 * Read from the copied remote cap so that we take into account
959 * if capabilities were reduced during exchange.
961 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
965 *rate = tb_dp_cap_get_rate(val);
966 *lanes = tb_dp_cap_get_lanes(val);
968 tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
969 tb_dp_bandwidth(*rate, *lanes));
973 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
976 struct tb_port *in = tunnel->src_port;
981 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read
982 * parameter values so this so we can use this to determine the
983 * maximum possible bandwidth over this link.
985 ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes);
989 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
991 *max_down = tb_dp_bandwidth(rate, lanes);
993 *max_up = tb_dp_bandwidth(rate, lanes);
1000 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1003 struct tb_port *in = tunnel->src_port;
1004 const struct tb_switch *sw = in->sw;
1005 u32 rate = 0, lanes = 0;
1008 if (tb_dp_is_usb4(sw)) {
1010 * On USB4 routers check if the bandwidth allocation
1011 * mode is enabled first and then read the bandwidth
1012 * through those registers.
1014 ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up,
1017 if (ret != -EOPNOTSUPP)
1023 * Then see if the DPRX negotiation is ready and if yes
1024 * return that bandwidth (it may be smaller than the
1025 * reduced one). Otherwise return the remote (possibly
1028 ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
1030 if (ret == -ETIMEDOUT)
1031 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1036 } else if (sw->generation >= 2) {
1037 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1041 /* No bandwidth management for legacy devices */
1047 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
1049 *consumed_down = tb_dp_bandwidth(rate, lanes);
1051 *consumed_up = tb_dp_bandwidth(rate, lanes);
1058 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1060 struct tb_port *port = hop->in_port;
1061 struct tb_switch *sw = port->sw;
1063 if (tb_port_use_credit_allocation(port))
1064 hop->initial_credits = sw->min_dp_aux_credits;
1066 hop->initial_credits = 1;
1069 static void tb_dp_init_aux_path(struct tb_path *path)
1071 struct tb_path_hop *hop;
1073 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1074 path->egress_shared_buffer = TB_PATH_NONE;
1075 path->ingress_fc_enable = TB_PATH_ALL;
1076 path->ingress_shared_buffer = TB_PATH_NONE;
1080 tb_path_for_each_hop(path, hop)
1081 tb_dp_init_aux_credits(hop);
1084 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1086 struct tb_port *port = hop->in_port;
1087 struct tb_switch *sw = port->sw;
1089 if (tb_port_use_credit_allocation(port)) {
1090 unsigned int nfc_credits;
1091 size_t max_dp_streams;
1093 tb_available_credits(port, &max_dp_streams);
1095 * Read the number of currently allocated NFC credits
1096 * from the lane adapter. Since we only use them for DP
1097 * tunneling we can use that to figure out how many DP
1098 * tunnels already go through the lane adapter.
1100 nfc_credits = port->config.nfc_credits &
1101 ADP_CS_4_NFC_BUFFERS_MASK;
1102 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1105 hop->nfc_credits = sw->min_dp_main_credits;
1107 hop->nfc_credits = min(port->total_credits - 2, 12U);
1113 static int tb_dp_init_video_path(struct tb_path *path)
1115 struct tb_path_hop *hop;
1117 path->egress_fc_enable = TB_PATH_NONE;
1118 path->egress_shared_buffer = TB_PATH_NONE;
1119 path->ingress_fc_enable = TB_PATH_NONE;
1120 path->ingress_shared_buffer = TB_PATH_NONE;
1124 tb_path_for_each_hop(path, hop) {
1127 ret = tb_dp_init_video_credits(hop);
1136 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1137 * @tb: Pointer to the domain structure
1138 * @in: DP in adapter
1139 * @alloc_hopid: Allocate HopIDs from visited ports
1141 * If @in adapter is active, follows the tunnel to the DP out adapter
1142 * and back. Returns the discovered tunnel or %NULL if there was no
1145 * Return: DP tunnel or %NULL if no tunnel found.
1147 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1150 struct tb_tunnel *tunnel;
1151 struct tb_port *port;
1152 struct tb_path *path;
1154 if (!tb_dp_port_is_enabled(in))
1157 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1161 tunnel->init = tb_dp_init;
1162 tunnel->deinit = tb_dp_deinit;
1163 tunnel->activate = tb_dp_activate;
1164 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1165 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1166 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1167 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1168 tunnel->src_port = in;
1170 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1171 &tunnel->dst_port, "Video", alloc_hopid);
1173 /* Just disable the DP IN port */
1174 tb_dp_port_enable(in, false);
1177 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1178 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
1181 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1184 goto err_deactivate;
1185 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1186 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
1188 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1189 &port, "AUX RX", alloc_hopid);
1191 goto err_deactivate;
1192 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1193 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
1195 /* Validate that the tunnel is complete */
1196 if (!tb_port_is_dpout(tunnel->dst_port)) {
1197 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1198 goto err_deactivate;
1201 if (!tb_dp_port_is_enabled(tunnel->dst_port))
1202 goto err_deactivate;
1204 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1205 goto err_deactivate;
1207 if (port != tunnel->src_port) {
1208 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1209 goto err_deactivate;
1212 tb_tunnel_dbg(tunnel, "discovered\n");
1216 tb_tunnel_deactivate(tunnel);
1218 tb_tunnel_free(tunnel);
1224 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1225 * @tb: Pointer to the domain structure
1226 * @in: DP in adapter port
1227 * @out: DP out adapter port
1228 * @link_nr: Preferred lane adapter when the link is not bonded
1229 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1231 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1232 * (%0 if not limited)
1234 * Allocates a tunnel between @in and @out that is capable of tunneling
1235 * Display Port traffic.
1237 * Return: Returns a tb_tunnel on success or NULL on failure.
1239 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1240 struct tb_port *out, int link_nr,
1241 int max_up, int max_down)
1243 struct tb_tunnel *tunnel;
1244 struct tb_path **paths;
1245 struct tb_path *path;
1247 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1250 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1254 tunnel->init = tb_dp_init;
1255 tunnel->deinit = tb_dp_deinit;
1256 tunnel->activate = tb_dp_activate;
1257 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1258 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1259 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1260 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1261 tunnel->src_port = in;
1262 tunnel->dst_port = out;
1263 tunnel->max_up = max_up;
1264 tunnel->max_down = max_down;
1266 paths = tunnel->paths;
1268 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1272 tb_dp_init_video_path(path);
1273 paths[TB_DP_VIDEO_PATH_OUT] = path;
1275 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1276 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1279 tb_dp_init_aux_path(path);
1280 paths[TB_DP_AUX_PATH_OUT] = path;
1282 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1283 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1286 tb_dp_init_aux_path(path);
1287 paths[TB_DP_AUX_PATH_IN] = path;
1292 tb_tunnel_free(tunnel);
1296 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1298 const struct tb_switch *sw = port->sw;
1301 credits = tb_available_credits(port, NULL);
1302 if (tb_acpi_may_tunnel_pcie())
1303 credits -= sw->max_pcie_credits;
1304 credits -= port->dma_credits;
1306 return credits > 0 ? credits : 0;
1309 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1311 struct tb_port *port = hop->in_port;
1313 if (tb_port_use_credit_allocation(port)) {
1314 unsigned int available = tb_dma_available_credits(port);
1317 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1318 * DMA path cannot be established.
1320 if (available < TB_MIN_DMA_CREDITS)
1323 while (credits > available)
1326 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1329 port->dma_credits += credits;
1331 if (tb_port_is_null(port))
1332 credits = port->bonded ? 14 : 6;
1334 credits = min(port->total_credits, credits);
1337 hop->initial_credits = credits;
1341 /* Path from lane adapter to NHI */
1342 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1344 struct tb_path_hop *hop;
1345 unsigned int i, tmp;
1347 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1348 path->ingress_fc_enable = TB_PATH_ALL;
1349 path->egress_shared_buffer = TB_PATH_NONE;
1350 path->ingress_shared_buffer = TB_PATH_NONE;
1353 path->clear_fc = true;
1356 * First lane adapter is the one connected to the remote host.
1357 * We don't tunnel other traffic over this link so can use all
1358 * the credits (except the ones reserved for control traffic).
1360 hop = &path->hops[0];
1361 tmp = min(tb_usable_credits(hop->in_port), credits);
1362 hop->initial_credits = tmp;
1363 hop->in_port->dma_credits += tmp;
1365 for (i = 1; i < path->path_length; i++) {
1368 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1376 /* Path from NHI to lane adapter */
1377 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1379 struct tb_path_hop *hop;
1381 path->egress_fc_enable = TB_PATH_ALL;
1382 path->ingress_fc_enable = TB_PATH_ALL;
1383 path->egress_shared_buffer = TB_PATH_NONE;
1384 path->ingress_shared_buffer = TB_PATH_NONE;
1387 path->clear_fc = true;
1389 tb_path_for_each_hop(path, hop) {
1392 ret = tb_dma_reserve_credits(hop, credits);
1400 static void tb_dma_release_credits(struct tb_path_hop *hop)
1402 struct tb_port *port = hop->in_port;
1404 if (tb_port_use_credit_allocation(port)) {
1405 port->dma_credits -= hop->initial_credits;
1407 tb_port_dbg(port, "released %u DMA path credits\n",
1408 hop->initial_credits);
1412 static void tb_dma_deinit_path(struct tb_path *path)
1414 struct tb_path_hop *hop;
1416 tb_path_for_each_hop(path, hop)
1417 tb_dma_release_credits(hop);
1420 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1424 for (i = 0; i < tunnel->npaths; i++) {
1425 if (!tunnel->paths[i])
1427 tb_dma_deinit_path(tunnel->paths[i]);
1432 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1433 * @tb: Pointer to the domain structure
1434 * @nhi: Host controller port
1435 * @dst: Destination null port which the other domain is connected to
1436 * @transmit_path: HopID used for transmitting packets
1437 * @transmit_ring: NHI ring number used to send packets towards the
1438 * other domain. Set to %-1 if TX path is not needed.
1439 * @receive_path: HopID used for receiving packets
1440 * @receive_ring: NHI ring number used to receive packets from the
1441 * other domain. Set to %-1 if RX path is not needed.
1443 * Return: Returns a tb_tunnel on success or NULL on failure.
1445 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1446 struct tb_port *dst, int transmit_path,
1447 int transmit_ring, int receive_path,
1450 struct tb_tunnel *tunnel;
1451 size_t npaths = 0, i = 0;
1452 struct tb_path *path;
1455 if (receive_ring > 0)
1457 if (transmit_ring > 0)
1460 if (WARN_ON(!npaths))
1463 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1467 tunnel->src_port = nhi;
1468 tunnel->dst_port = dst;
1469 tunnel->deinit = tb_dma_deinit;
1471 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1473 if (receive_ring > 0) {
1474 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1478 tunnel->paths[i++] = path;
1479 if (tb_dma_init_rx_path(path, credits)) {
1480 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1485 if (transmit_ring > 0) {
1486 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1490 tunnel->paths[i++] = path;
1491 if (tb_dma_init_tx_path(path, credits)) {
1492 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1500 tb_tunnel_free(tunnel);
1505 * tb_tunnel_match_dma() - Match DMA tunnel
1506 * @tunnel: Tunnel to match
1507 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1508 * @transmit_ring: NHI ring number used to send packets towards the
1509 * other domain. Pass %-1 to ignore.
1510 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1511 * @receive_ring: NHI ring number used to receive packets from the
1512 * other domain. Pass %-1 to ignore.
1514 * This function can be used to match specific DMA tunnel, if there are
1515 * multiple DMA tunnels going through the same XDomain connection.
1516 * Returns true if there is match and false otherwise.
1518 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1519 int transmit_ring, int receive_path, int receive_ring)
1521 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1524 if (!receive_ring || !transmit_ring)
1527 for (i = 0; i < tunnel->npaths; i++) {
1528 const struct tb_path *path = tunnel->paths[i];
1533 if (tb_port_is_nhi(path->hops[0].in_port))
1535 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1539 if (transmit_ring > 0 || transmit_path > 0) {
1542 if (transmit_ring > 0 &&
1543 (tx_path->hops[0].in_hop_index != transmit_ring))
1545 if (transmit_path > 0 &&
1546 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1550 if (receive_ring > 0 || receive_path > 0) {
1553 if (receive_path > 0 &&
1554 (rx_path->hops[0].in_hop_index != receive_path))
1556 if (receive_ring > 0 &&
1557 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1564 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1566 int ret, up_max_rate, down_max_rate;
1568 ret = usb4_usb3_port_max_link_rate(up);
1573 ret = usb4_usb3_port_max_link_rate(down);
1576 down_max_rate = ret;
1578 return min(up_max_rate, down_max_rate);
1581 static int tb_usb3_init(struct tb_tunnel *tunnel)
1583 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1584 tunnel->allocated_up, tunnel->allocated_down);
1586 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1587 &tunnel->allocated_up,
1588 &tunnel->allocated_down);
1591 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1595 res = tb_usb3_port_enable(tunnel->src_port, activate);
1599 if (tb_port_is_usb3_up(tunnel->dst_port))
1600 return tb_usb3_port_enable(tunnel->dst_port, activate);
1605 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1606 int *consumed_up, int *consumed_down)
1608 int pcie_enabled = tb_acpi_may_tunnel_pcie();
1611 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1612 * take that it into account here.
1614 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1615 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1619 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1623 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1624 &tunnel->allocated_up,
1625 &tunnel->allocated_down);
1629 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1630 tunnel->allocated_up, tunnel->allocated_down);
1634 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1636 int *available_down)
1638 int ret, max_rate, allocate_up, allocate_down;
1640 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1642 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1645 /* Use maximum link rate if the link valid is not set */
1646 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1648 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1654 * 90% of the max rate can be allocated for isochronous
1657 max_rate = ret * 90 / 100;
1659 /* No need to reclaim if already at maximum */
1660 if (tunnel->allocated_up >= max_rate &&
1661 tunnel->allocated_down >= max_rate)
1664 /* Don't go lower than what is already allocated */
1665 allocate_up = min(max_rate, *available_up);
1666 if (allocate_up < tunnel->allocated_up)
1667 allocate_up = tunnel->allocated_up;
1669 allocate_down = min(max_rate, *available_down);
1670 if (allocate_down < tunnel->allocated_down)
1671 allocate_down = tunnel->allocated_down;
1673 /* If no changes no need to do more */
1674 if (allocate_up == tunnel->allocated_up &&
1675 allocate_down == tunnel->allocated_down)
1678 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1681 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1685 tunnel->allocated_up = allocate_up;
1686 *available_up -= tunnel->allocated_up;
1688 tunnel->allocated_down = allocate_down;
1689 *available_down -= tunnel->allocated_down;
1691 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1692 tunnel->allocated_up, tunnel->allocated_down);
1695 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1697 struct tb_port *port = hop->in_port;
1698 struct tb_switch *sw = port->sw;
1699 unsigned int credits;
1701 if (tb_port_use_credit_allocation(port)) {
1702 credits = sw->max_usb3_credits;
1704 if (tb_port_is_null(port))
1705 credits = port->bonded ? 32 : 16;
1710 hop->initial_credits = credits;
1713 static void tb_usb3_init_path(struct tb_path *path)
1715 struct tb_path_hop *hop;
1717 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1718 path->egress_shared_buffer = TB_PATH_NONE;
1719 path->ingress_fc_enable = TB_PATH_ALL;
1720 path->ingress_shared_buffer = TB_PATH_NONE;
1723 path->drop_packages = 0;
1725 tb_path_for_each_hop(path, hop)
1726 tb_usb3_init_credits(hop);
1730 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1731 * @tb: Pointer to the domain structure
1732 * @down: USB3 downstream adapter
1733 * @alloc_hopid: Allocate HopIDs from visited ports
1735 * If @down adapter is active, follows the tunnel to the USB3 upstream
1736 * adapter and back. Returns the discovered tunnel or %NULL if there was
1739 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1742 struct tb_tunnel *tunnel;
1743 struct tb_path *path;
1745 if (!tb_usb3_port_is_enabled(down))
1748 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1752 tunnel->activate = tb_usb3_activate;
1753 tunnel->src_port = down;
1756 * Discover both paths even if they are not complete. We will
1757 * clean them up by calling tb_tunnel_deactivate() below in that
1760 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1761 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1763 /* Just disable the downstream port */
1764 tb_usb3_port_enable(down, false);
1767 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1768 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1770 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1771 "USB3 Up", alloc_hopid);
1773 goto err_deactivate;
1774 tunnel->paths[TB_USB3_PATH_UP] = path;
1775 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1777 /* Validate that the tunnel is complete */
1778 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1779 tb_port_warn(tunnel->dst_port,
1780 "path does not end on an USB3 adapter, cleaning up\n");
1781 goto err_deactivate;
1784 if (down != tunnel->src_port) {
1785 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1786 goto err_deactivate;
1789 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1790 tb_tunnel_warn(tunnel,
1791 "tunnel is not fully activated, cleaning up\n");
1792 goto err_deactivate;
1795 if (!tb_route(down->sw)) {
1799 * Read the initial bandwidth allocation for the first
1802 ret = usb4_usb3_port_allocated_bandwidth(down,
1803 &tunnel->allocated_up, &tunnel->allocated_down);
1805 goto err_deactivate;
1807 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1808 tunnel->allocated_up, tunnel->allocated_down);
1810 tunnel->init = tb_usb3_init;
1811 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1812 tunnel->release_unused_bandwidth =
1813 tb_usb3_release_unused_bandwidth;
1814 tunnel->reclaim_available_bandwidth =
1815 tb_usb3_reclaim_available_bandwidth;
1818 tb_tunnel_dbg(tunnel, "discovered\n");
1822 tb_tunnel_deactivate(tunnel);
1824 tb_tunnel_free(tunnel);
1830 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1831 * @tb: Pointer to the domain structure
1832 * @up: USB3 upstream adapter port
1833 * @down: USB3 downstream adapter port
1834 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1836 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1837 * (%0 if not limited).
1839 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1840 * @TB_TYPE_USB3_DOWN.
1842 * Return: Returns a tb_tunnel on success or %NULL on failure.
1844 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1845 struct tb_port *down, int max_up,
1848 struct tb_tunnel *tunnel;
1849 struct tb_path *path;
1853 * Check that we have enough bandwidth available for the new
1856 if (max_up > 0 || max_down > 0) {
1857 max_rate = tb_usb3_max_link_rate(down, up);
1861 /* Only 90% can be allocated for USB3 isochronous transfers */
1862 max_rate = max_rate * 90 / 100;
1863 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1866 if (max_rate > max_up || max_rate > max_down) {
1867 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1872 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1876 tunnel->activate = tb_usb3_activate;
1877 tunnel->src_port = down;
1878 tunnel->dst_port = up;
1879 tunnel->max_up = max_up;
1880 tunnel->max_down = max_down;
1882 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1885 tb_tunnel_free(tunnel);
1888 tb_usb3_init_path(path);
1889 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1891 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1894 tb_tunnel_free(tunnel);
1897 tb_usb3_init_path(path);
1898 tunnel->paths[TB_USB3_PATH_UP] = path;
1900 if (!tb_route(down->sw)) {
1901 tunnel->allocated_up = max_rate;
1902 tunnel->allocated_down = max_rate;
1904 tunnel->init = tb_usb3_init;
1905 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1906 tunnel->release_unused_bandwidth =
1907 tb_usb3_release_unused_bandwidth;
1908 tunnel->reclaim_available_bandwidth =
1909 tb_usb3_reclaim_available_bandwidth;
1916 * tb_tunnel_free() - free a tunnel
1917 * @tunnel: Tunnel to be freed
1919 * Frees a tunnel. The tunnel does not need to be deactivated.
1921 void tb_tunnel_free(struct tb_tunnel *tunnel)
1929 tunnel->deinit(tunnel);
1931 for (i = 0; i < tunnel->npaths; i++) {
1932 if (tunnel->paths[i])
1933 tb_path_free(tunnel->paths[i]);
1936 kfree(tunnel->paths);
1941 * tb_tunnel_is_invalid - check whether an activated path is still valid
1942 * @tunnel: Tunnel to check
1944 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1948 for (i = 0; i < tunnel->npaths; i++) {
1949 WARN_ON(!tunnel->paths[i]->activated);
1950 if (tb_path_is_invalid(tunnel->paths[i]))
1958 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1959 * @tunnel: Tunnel to restart
1961 * Return: 0 on success and negative errno in case if failure
1963 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1967 tb_tunnel_dbg(tunnel, "activating\n");
1970 * Make sure all paths are properly disabled before enabling
1973 for (i = 0; i < tunnel->npaths; i++) {
1974 if (tunnel->paths[i]->activated) {
1975 tb_path_deactivate(tunnel->paths[i]);
1976 tunnel->paths[i]->activated = false;
1981 res = tunnel->init(tunnel);
1986 for (i = 0; i < tunnel->npaths; i++) {
1987 res = tb_path_activate(tunnel->paths[i]);
1992 if (tunnel->activate) {
1993 res = tunnel->activate(tunnel, true);
2001 tb_tunnel_warn(tunnel, "activation failed\n");
2002 tb_tunnel_deactivate(tunnel);
2007 * tb_tunnel_activate() - activate a tunnel
2008 * @tunnel: Tunnel to activate
2010 * Return: Returns 0 on success or an error code on failure.
2012 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2016 for (i = 0; i < tunnel->npaths; i++) {
2017 if (tunnel->paths[i]->activated) {
2018 tb_tunnel_WARN(tunnel,
2019 "trying to activate an already activated tunnel\n");
2024 return tb_tunnel_restart(tunnel);
2028 * tb_tunnel_deactivate() - deactivate a tunnel
2029 * @tunnel: Tunnel to deactivate
2031 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2035 tb_tunnel_dbg(tunnel, "deactivating\n");
2037 if (tunnel->activate)
2038 tunnel->activate(tunnel, false);
2040 for (i = 0; i < tunnel->npaths; i++) {
2041 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2042 tb_path_deactivate(tunnel->paths[i]);
2047 * tb_tunnel_port_on_path() - Does the tunnel go through port
2048 * @tunnel: Tunnel to check
2049 * @port: Port to check
2051 * Returns true if @tunnel goes through @port (direction does not matter),
2054 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2055 const struct tb_port *port)
2059 for (i = 0; i < tunnel->npaths; i++) {
2060 if (!tunnel->paths[i])
2063 if (tb_path_port_on_path(tunnel->paths[i], port))
2070 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2074 for (i = 0; i < tunnel->npaths; i++) {
2075 if (!tunnel->paths[i])
2077 if (!tunnel->paths[i]->activated)
2085 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2086 * @tunnel: Tunnel to check
2087 * @max_up: Maximum upstream bandwidth in Mb/s
2088 * @max_down: Maximum downstream bandwidth in Mb/s
2090 * Returns maximum possible bandwidth this tunnel can go if not limited
2091 * by other bandwidth clients. If the tunnel does not support this
2092 * returns %-EOPNOTSUPP.
2094 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2097 if (!tb_tunnel_is_active(tunnel))
2100 if (tunnel->maximum_bandwidth)
2101 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2106 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2107 * @tunnel: Tunnel to check
2108 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2109 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2112 * Returns the bandwidth allocated for the tunnel. This may be higher
2113 * than what the tunnel actually consumes.
2115 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2116 int *allocated_down)
2118 if (!tb_tunnel_is_active(tunnel))
2121 if (tunnel->allocated_bandwidth)
2122 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2128 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2129 * @tunnel: Tunnel whose bandwidth allocation to change
2130 * @alloc_up: New upstream bandwidth in Mb/s
2131 * @alloc_down: New downstream bandwidth in Mb/s
2133 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2134 * and updates @alloc_up and @alloc_down to that was actually allocated
2135 * (it may not be the same as passed originally). Returns negative errno
2136 * in case of failure.
2138 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2141 if (!tb_tunnel_is_active(tunnel))
2144 if (tunnel->alloc_bandwidth)
2145 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2151 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2152 * @tunnel: Tunnel to check
2153 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2155 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2158 * Stores the amount of isochronous bandwidth @tunnel consumes in
2159 * @consumed_up and @consumed_down. In case of success returns %0,
2160 * negative errno otherwise.
2162 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2165 int up_bw = 0, down_bw = 0;
2167 if (!tb_tunnel_is_active(tunnel))
2170 if (tunnel->consumed_bandwidth) {
2173 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2177 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2183 *consumed_up = up_bw;
2185 *consumed_down = down_bw;
2191 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2192 * @tunnel: Tunnel whose unused bandwidth to release
2194 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2195 * moment) this function makes it to release all the unused bandwidth.
2197 * Returns %0 in case of success and negative errno otherwise.
2199 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2201 if (!tb_tunnel_is_active(tunnel))
2204 if (tunnel->release_unused_bandwidth) {
2207 ret = tunnel->release_unused_bandwidth(tunnel);
2216 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2217 * @tunnel: Tunnel reclaiming available bandwidth
2218 * @available_up: Available upstream bandwidth (in Mb/s)
2219 * @available_down: Available downstream bandwidth (in Mb/s)
2221 * Reclaims bandwidth from @available_up and @available_down and updates
2222 * the variables accordingly (e.g decreases both according to what was
2223 * reclaimed by the tunnel). If nothing was reclaimed the values are
2226 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2228 int *available_down)
2230 if (!tb_tunnel_is_active(tunnel))
2233 if (tunnel->reclaim_available_bandwidth)
2234 tunnel->reclaim_available_bandwidth(tunnel, available_up,