1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
19 #define TB_TIMEOUT 100 /* ms */
20 #define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
23 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24 * direction. This is 40G - 10% guard band bandwidth.
26 #define TB_ASYM_MIN (40000 * 90 / 100)
29 * Threshold bandwidth (in Mb/s) that is used to switch the links to
30 * asymmetric and back. This is selected as 45G which means when the
31 * request is higher than this, we switch the link to asymmetric, and
32 * when it is less than this we switch it back. The 45G is selected so
33 * that we still have 27G (of the total 72G) for bulk PCIe traffic when
34 * switching back to symmetric.
36 #define TB_ASYM_THRESHOLD 45000
38 #define MAX_GROUPS 7 /* max Group_ID is 7 */
40 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
41 module_param_named(asym_threshold, asym_threshold, uint, 0444);
42 MODULE_PARM_DESC(asym_threshold,
43 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
44 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
47 * struct tb_cm - Simple Thunderbolt connection manager
48 * @tunnel_list: List of active tunnels
49 * @dp_resources: List of available DP resources for DP tunneling
50 * @hotplug_active: tb_handle_hotplug will stop progressing plug
51 * events and exit if this is not set (it needs to
52 * acquire the lock one more time). Used to drain wq
53 * after cfg has been paused.
54 * @remove_work: Work used to remove any unplugged routers after
56 * @groups: Bandwidth groups used in this domain.
59 struct list_head tunnel_list;
60 struct list_head dp_resources;
62 struct delayed_work remove_work;
63 struct tb_bandwidth_group groups[MAX_GROUPS];
66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
68 return ((void *)tcm - sizeof(struct tb));
71 struct tb_hotplug_event {
72 struct work_struct work;
79 static void tb_handle_hotplug(struct work_struct *work);
81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
83 struct tb_hotplug_event *ev;
85 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
93 INIT_WORK(&ev->work, tb_handle_hotplug);
94 queue_work(tb->wq, &ev->work);
97 /* enumeration & hot plug handling */
99 static void tb_add_dp_resources(struct tb_switch *sw)
101 struct tb_cm *tcm = tb_priv(sw->tb);
102 struct tb_port *port;
104 tb_switch_for_each_port(sw, port) {
105 if (!tb_port_is_dpin(port))
108 if (!tb_switch_query_dp_resource(sw, port))
112 * If DP IN on device router exist, position it at the
113 * beginning of the DP resources list, so that it is used
114 * before DP IN of the host router. This way external GPU(s)
115 * will be prioritized when pairing DP IN to a DP OUT.
118 list_add(&port->list, &tcm->dp_resources);
120 list_add_tail(&port->list, &tcm->dp_resources);
122 tb_port_dbg(port, "DP IN resource available\n");
126 static void tb_remove_dp_resources(struct tb_switch *sw)
128 struct tb_cm *tcm = tb_priv(sw->tb);
129 struct tb_port *port, *tmp;
131 /* Clear children resources first */
132 tb_switch_for_each_port(sw, port) {
133 if (tb_port_has_remote(port))
134 tb_remove_dp_resources(port->remote->sw);
137 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
138 if (port->sw == sw) {
139 tb_port_dbg(port, "DP OUT resource unavailable\n");
140 list_del_init(&port->list);
145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
147 struct tb_cm *tcm = tb_priv(tb);
150 list_for_each_entry(p, &tcm->dp_resources, list) {
155 tb_port_dbg(port, "DP %s resource available discovered\n",
156 tb_port_is_dpin(port) ? "IN" : "OUT");
157 list_add_tail(&port->list, &tcm->dp_resources);
160 static void tb_discover_dp_resources(struct tb *tb)
162 struct tb_cm *tcm = tb_priv(tb);
163 struct tb_tunnel *tunnel;
165 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
166 if (tb_tunnel_is_dp(tunnel))
167 tb_discover_dp_resource(tb, tunnel->dst_port);
171 /* Enables CL states up to host router */
172 static int tb_enable_clx(struct tb_switch *sw)
174 struct tb_cm *tcm = tb_priv(sw->tb);
175 unsigned int clx = TB_CL0S | TB_CL1;
176 const struct tb_tunnel *tunnel;
180 * Currently only enable CLx for the first link. This is enough
181 * to allow the CPU to save energy at least on Intel hardware
182 * and makes it slightly simpler to implement. We may change
183 * this in the future to cover the whole topology if it turns
184 * out to be beneficial.
186 while (sw && tb_switch_depth(sw) > 1)
187 sw = tb_switch_parent(sw);
192 if (tb_switch_depth(sw) != 1)
196 * If we are re-enabling then check if there is an active DMA
197 * tunnel and in that case bail out.
199 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
200 if (tb_tunnel_is_dma(tunnel)) {
201 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
207 * Initially try with CL2. If that's not supported by the
208 * topology try with CL0s and CL1 and then give up.
210 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
211 if (ret == -EOPNOTSUPP)
212 ret = tb_switch_clx_enable(sw, clx);
213 return ret == -EOPNOTSUPP ? 0 : ret;
217 * tb_disable_clx() - Disable CL states up to host router
218 * @sw: Router to start
220 * Disables CL states from @sw up to the host router. Returns true if
221 * any CL state were disabled. This can be used to figure out whether
222 * the link was setup by us or the boot firmware so we don't
223 * accidentally enable them if they were not enabled during discovery.
225 static bool tb_disable_clx(struct tb_switch *sw)
227 bool disabled = false;
232 ret = tb_switch_clx_disable(sw);
236 tb_sw_warn(sw, "failed to disable CL states\n");
238 sw = tb_switch_parent(sw);
244 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
246 struct tb_switch *sw;
248 sw = tb_to_switch(dev);
252 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
253 enum tb_switch_tmu_mode mode;
256 if (tb_switch_clx_is_enabled(sw, TB_CL1))
257 mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
259 mode = TB_SWITCH_TMU_MODE_HIFI_BI;
261 ret = tb_switch_tmu_configure(sw, mode);
265 return tb_switch_tmu_enable(sw);
271 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
273 struct tb_switch *sw;
279 * Once first DP tunnel is established we change the TMU
280 * accuracy of first depth child routers (and the host router)
281 * to the highest. This is needed for the DP tunneling to work
282 * but also allows CL0s.
284 * If both routers are v2 then we don't need to do anything as
285 * they are using enhanced TMU mode that allows all CLx.
287 sw = tunnel->tb->root_switch;
288 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
291 static int tb_enable_tmu(struct tb_switch *sw)
296 * If both routers at the end of the link are v2 we simply
297 * enable the enhanched uni-directional mode. That covers all
298 * the CL states. For v1 and before we need to use the normal
299 * rate to allow CL1 (when supported). Otherwise we keep the TMU
300 * running at the highest accuracy.
302 ret = tb_switch_tmu_configure(sw,
303 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
304 if (ret == -EOPNOTSUPP) {
305 if (tb_switch_clx_is_enabled(sw, TB_CL1))
306 ret = tb_switch_tmu_configure(sw,
307 TB_SWITCH_TMU_MODE_LOWRES);
309 ret = tb_switch_tmu_configure(sw,
310 TB_SWITCH_TMU_MODE_HIFI_BI);
315 /* If it is already enabled in correct mode, don't touch it */
316 if (tb_switch_tmu_is_enabled(sw))
319 ret = tb_switch_tmu_disable(sw);
323 ret = tb_switch_tmu_post_time(sw);
327 return tb_switch_tmu_enable(sw);
330 static void tb_switch_discover_tunnels(struct tb_switch *sw,
331 struct list_head *list,
334 struct tb *tb = sw->tb;
335 struct tb_port *port;
337 tb_switch_for_each_port(sw, port) {
338 struct tb_tunnel *tunnel = NULL;
340 switch (port->config.type) {
341 case TB_TYPE_DP_HDMI_IN:
342 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
343 tb_increase_tmu_accuracy(tunnel);
346 case TB_TYPE_PCIE_DOWN:
347 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
350 case TB_TYPE_USB3_DOWN:
351 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
359 list_add_tail(&tunnel->list, list);
362 tb_switch_for_each_port(sw, port) {
363 if (tb_port_has_remote(port)) {
364 tb_switch_discover_tunnels(port->remote->sw, list,
370 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
372 if (tb_switch_is_usb4(port->sw))
373 return usb4_port_configure_xdomain(port, xd);
374 return tb_lc_configure_xdomain(port);
377 static void tb_port_unconfigure_xdomain(struct tb_port *port)
379 if (tb_switch_is_usb4(port->sw))
380 usb4_port_unconfigure_xdomain(port);
382 tb_lc_unconfigure_xdomain(port);
385 static void tb_scan_xdomain(struct tb_port *port)
387 struct tb_switch *sw = port->sw;
388 struct tb *tb = sw->tb;
389 struct tb_xdomain *xd;
392 if (!tb_is_xdomain_enabled())
395 route = tb_downstream_route(port);
396 xd = tb_xdomain_find_by_route(tb, route);
402 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
405 tb_port_at(route, sw)->xdomain = xd;
406 tb_port_configure_xdomain(port, xd);
412 * tb_find_unused_port() - return the first inactive port on @sw
413 * @sw: Switch to find the port on
414 * @type: Port type to look for
416 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
417 enum tb_port_type type)
419 struct tb_port *port;
421 tb_switch_for_each_port(sw, port) {
422 if (tb_is_upstream_port(port))
424 if (port->config.type != type)
428 if (tb_port_is_enabled(port))
435 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
436 const struct tb_port *port)
438 struct tb_port *down;
440 down = usb4_switch_map_usb3_down(sw, port);
441 if (down && !tb_usb3_port_is_enabled(down))
446 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
447 struct tb_port *src_port,
448 struct tb_port *dst_port)
450 struct tb_cm *tcm = tb_priv(tb);
451 struct tb_tunnel *tunnel;
453 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
454 if (tunnel->type == type &&
455 ((src_port && src_port == tunnel->src_port) ||
456 (dst_port && dst_port == tunnel->dst_port))) {
464 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
465 struct tb_port *src_port,
466 struct tb_port *dst_port)
468 struct tb_port *port, *usb3_down;
469 struct tb_switch *sw;
471 /* Pick the router that is deepest in the topology */
472 if (tb_port_path_direction_downstream(src_port, dst_port))
477 /* Can't be the host router */
478 if (sw == tb->root_switch)
481 /* Find the downstream USB4 port that leads to this router */
482 port = tb_port_at(tb_route(sw), tb->root_switch);
483 /* Find the corresponding host router USB3 downstream port */
484 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
488 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
492 * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
493 * @tb: Domain structure
494 * @src_port: Source protocol adapter
495 * @dst_port: Destination protocol adapter
496 * @port: USB4 port the consumed bandwidth is calculated
497 * @consumed_up: Consumed upsream bandwidth (Mb/s)
498 * @consumed_down: Consumed downstream bandwidth (Mb/s)
500 * Calculates consumed USB3 and PCIe bandwidth at @port between path
501 * from @src_port to @dst_port. Does not take tunnel starting from
502 * @src_port and ending from @src_port into account.
504 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
505 struct tb_port *src_port,
506 struct tb_port *dst_port,
507 struct tb_port *port,
511 int pci_consumed_up, pci_consumed_down;
512 struct tb_tunnel *tunnel;
514 *consumed_up = *consumed_down = 0;
516 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
517 if (tunnel && tunnel->src_port != src_port &&
518 tunnel->dst_port != dst_port) {
521 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
528 * If there is anything reserved for PCIe bulk traffic take it
529 * into account here too.
531 if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
532 *consumed_up += pci_consumed_up;
533 *consumed_down += pci_consumed_down;
540 * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
541 * @tb: Domain structure
542 * @src_port: Source protocol adapter
543 * @dst_port: Destination protocol adapter
544 * @port: USB4 port the consumed bandwidth is calculated
545 * @consumed_up: Consumed upsream bandwidth (Mb/s)
546 * @consumed_down: Consumed downstream bandwidth (Mb/s)
548 * Calculates consumed DP bandwidth at @port between path from @src_port
549 * to @dst_port. Does not take tunnel starting from @src_port and ending
550 * from @src_port into account.
552 * If there is bandwidth reserved for any of the groups between
553 * @src_port and @dst_port (but not yet used) that is also taken into
554 * account in the returned consumed bandwidth.
556 static int tb_consumed_dp_bandwidth(struct tb *tb,
557 struct tb_port *src_port,
558 struct tb_port *dst_port,
559 struct tb_port *port,
563 int group_reserved[MAX_GROUPS] = {};
564 struct tb_cm *tcm = tb_priv(tb);
565 struct tb_tunnel *tunnel;
569 *consumed_up = *consumed_down = 0;
572 * Find all DP tunnels that cross the port and reduce
573 * their consumed bandwidth from the available.
575 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
576 const struct tb_bandwidth_group *group;
577 int dp_consumed_up, dp_consumed_down;
579 if (tb_tunnel_is_invalid(tunnel))
582 if (!tb_tunnel_is_dp(tunnel))
585 if (!tb_tunnel_port_on_path(tunnel, port))
589 * Calculate what is reserved for groups crossing the
590 * same ports only once (as that is reserved for all the
591 * tunnels in the group).
593 group = tunnel->src_port->group;
594 if (group && group->reserved && !group_reserved[group->index])
595 group_reserved[group->index] = group->reserved;
598 * Ignore the DP tunnel between src_port and dst_port
599 * because it is the same tunnel and we may be
600 * re-calculating estimated bandwidth.
602 if (tunnel->src_port == src_port &&
603 tunnel->dst_port == dst_port)
606 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
611 *consumed_up += dp_consumed_up;
612 *consumed_down += dp_consumed_down;
615 downstream = tb_port_path_direction_downstream(src_port, dst_port);
616 for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
618 *consumed_down += group_reserved[i];
620 *consumed_up += group_reserved[i];
626 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
627 struct tb_port *port)
629 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
630 enum tb_link_width width;
632 if (tb_is_upstream_port(port))
633 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
635 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
637 return tb_port_width_supported(port, width);
641 * tb_maximum_bandwidth() - Maximum bandwidth over a single link
642 * @tb: Domain structure
643 * @src_port: Source protocol adapter
644 * @dst_port: Destination protocol adapter
645 * @port: USB4 port the total bandwidth is calculated
646 * @max_up: Maximum upstream bandwidth (Mb/s)
647 * @max_down: Maximum downstream bandwidth (Mb/s)
648 * @include_asym: Include bandwidth if the link is switched from
649 * symmetric to asymmetric
651 * Returns maximum possible bandwidth in @max_up and @max_down over a
652 * single link at @port. If @include_asym is set then includes the
653 * additional banwdith if the links are transitioned into asymmetric to
654 * direction from @src_port to @dst_port.
656 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
657 struct tb_port *dst_port, struct tb_port *port,
658 int *max_up, int *max_down, bool include_asym)
660 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
661 int link_speed, link_width, up_bw, down_bw;
664 * Can include asymmetric, only if it is actually supported by
667 if (!tb_asym_supported(src_port, dst_port, port))
668 include_asym = false;
670 if (tb_is_upstream_port(port)) {
671 link_speed = port->sw->link_speed;
673 * sw->link_width is from upstream perspective so we use
674 * the opposite for downstream of the host router.
676 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
677 up_bw = link_speed * 3 * 1000;
678 down_bw = link_speed * 1 * 1000;
679 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
680 up_bw = link_speed * 1 * 1000;
681 down_bw = link_speed * 3 * 1000;
682 } else if (include_asym) {
684 * The link is symmetric at the moment but we
685 * can switch it to asymmetric as needed. Report
686 * this bandwidth as available (even though it
687 * is not yet enabled).
690 up_bw = link_speed * 1 * 1000;
691 down_bw = link_speed * 3 * 1000;
693 up_bw = link_speed * 3 * 1000;
694 down_bw = link_speed * 1 * 1000;
697 up_bw = link_speed * port->sw->link_width * 1000;
701 link_speed = tb_port_get_link_speed(port);
705 link_width = tb_port_get_link_width(port);
709 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
710 up_bw = link_speed * 1 * 1000;
711 down_bw = link_speed * 3 * 1000;
712 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
713 up_bw = link_speed * 3 * 1000;
714 down_bw = link_speed * 1 * 1000;
715 } else if (include_asym) {
717 * The link is symmetric at the moment but we
718 * can switch it to asymmetric as needed. Report
719 * this bandwidth as available (even though it
720 * is not yet enabled).
723 up_bw = link_speed * 1 * 1000;
724 down_bw = link_speed * 3 * 1000;
726 up_bw = link_speed * 3 * 1000;
727 down_bw = link_speed * 1 * 1000;
730 up_bw = link_speed * link_width * 1000;
735 /* Leave 10% guard band */
736 *max_up = up_bw - up_bw / 10;
737 *max_down = down_bw - down_bw / 10;
739 tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
744 * tb_available_bandwidth() - Available bandwidth for tunneling
745 * @tb: Domain structure
746 * @src_port: Source protocol adapter
747 * @dst_port: Destination protocol adapter
748 * @available_up: Available bandwidth upstream (Mb/s)
749 * @available_down: Available bandwidth downstream (Mb/s)
750 * @include_asym: Include bandwidth if the link is switched from
751 * symmetric to asymmetric
753 * Calculates maximum available bandwidth for protocol tunneling between
754 * @src_port and @dst_port at the moment. This is minimum of maximum
755 * link bandwidth across all links reduced by currently consumed
756 * bandwidth on that link.
758 * If @include_asym is true then includes also bandwidth that can be
759 * added when the links are transitioned into asymmetric (but does not
760 * transition the links).
762 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
763 struct tb_port *dst_port, int *available_up,
764 int *available_down, bool include_asym)
766 struct tb_port *port;
769 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
770 *available_up = *available_down = 120000;
772 /* Find the minimum available bandwidth over all links */
773 tb_for_each_port_on_path(src_port, dst_port, port) {
774 int max_up, max_down, consumed_up, consumed_down;
776 if (!tb_port_is_null(port))
779 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
780 &max_up, &max_down, include_asym);
784 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
789 max_up -= consumed_up;
790 max_down -= consumed_down;
792 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
793 &consumed_up, &consumed_down);
796 max_up -= consumed_up;
797 max_down -= consumed_down;
799 if (max_up < *available_up)
800 *available_up = max_up;
801 if (max_down < *available_down)
802 *available_down = max_down;
805 if (*available_up < 0)
807 if (*available_down < 0)
813 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
814 struct tb_port *src_port,
815 struct tb_port *dst_port)
817 struct tb_tunnel *tunnel;
819 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
820 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
823 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
824 struct tb_port *dst_port)
826 int ret, available_up, available_down;
827 struct tb_tunnel *tunnel;
829 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
833 tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
836 * Calculate available bandwidth for the first hop USB3 tunnel.
837 * That determines the whole USB3 bandwidth for this branch.
839 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
840 &available_up, &available_down, false);
842 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
846 tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
849 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
852 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
854 struct tb_switch *parent = tb_switch_parent(sw);
855 int ret, available_up, available_down;
856 struct tb_port *up, *down, *port;
857 struct tb_cm *tcm = tb_priv(tb);
858 struct tb_tunnel *tunnel;
860 if (!tb_acpi_may_tunnel_usb3()) {
861 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
865 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
873 * Look up available down port. Since we are chaining it should
874 * be found right above this switch.
876 port = tb_switch_downstream_port(sw);
877 down = tb_find_usb3_down(parent, port);
881 if (tb_route(parent)) {
882 struct tb_port *parent_up;
884 * Check first that the parent switch has its upstream USB3
885 * port enabled. Otherwise the chain is not complete and
886 * there is no point setting up a new tunnel.
888 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
889 if (!parent_up || !tb_port_is_enabled(parent_up))
892 /* Make all unused bandwidth available for the new tunnel */
893 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
898 ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
903 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
904 available_up, available_down);
906 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
913 if (tb_tunnel_activate(tunnel)) {
915 "USB3 tunnel activation failed, aborting\n");
920 list_add_tail(&tunnel->list, &tcm->tunnel_list);
921 if (tb_route(parent))
922 tb_reclaim_usb3_bandwidth(tb, down, up);
927 tb_tunnel_free(tunnel);
929 if (tb_route(parent))
930 tb_reclaim_usb3_bandwidth(tb, down, up);
935 static int tb_create_usb3_tunnels(struct tb_switch *sw)
937 struct tb_port *port;
940 if (!tb_acpi_may_tunnel_usb3())
944 ret = tb_tunnel_usb3(sw->tb, sw);
949 tb_switch_for_each_port(sw, port) {
950 if (!tb_port_has_remote(port))
952 ret = tb_create_usb3_tunnels(port->remote->sw);
961 * tb_configure_asym() - Transition links to asymmetric if needed
962 * @tb: Domain structure
963 * @src_port: Source adapter to start the transition
964 * @dst_port: Destination adapter
965 * @requested_up: Additional bandwidth (Mb/s) required upstream
966 * @requested_down: Additional bandwidth (Mb/s) required downstream
968 * Transition links between @src_port and @dst_port into asymmetric, with
969 * three lanes in the direction from @src_port towards @dst_port and one lane
970 * in the opposite direction, if the bandwidth requirements
971 * (requested + currently consumed) on that link exceed @asym_threshold.
973 * Must be called with available >= requested over all links.
975 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
976 struct tb_port *dst_port, int requested_up,
979 bool clx = false, clx_disabled = false, downstream;
980 struct tb_switch *sw;
987 downstream = tb_port_path_direction_downstream(src_port, dst_port);
988 /* Pick up router deepest in the hierarchy */
994 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
995 struct tb_port *down = tb_switch_downstream_port(up->sw);
996 enum tb_link_width width_up, width_down;
997 int consumed_up, consumed_down;
999 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1000 &consumed_up, &consumed_down);
1006 * Downstream so make sure upstream is within the 36G
1007 * (40G - guard band 10%), and the requested is above
1008 * what the threshold is.
1010 if (consumed_up + requested_up >= TB_ASYM_MIN) {
1014 /* Does consumed + requested exceed the threshold */
1015 if (consumed_down + requested_down < asym_threshold)
1018 width_up = TB_LINK_WIDTH_ASYM_RX;
1019 width_down = TB_LINK_WIDTH_ASYM_TX;
1021 /* Upstream, the opposite of above */
1022 if (consumed_down + requested_down >= TB_ASYM_MIN) {
1026 if (consumed_up + requested_up < asym_threshold)
1029 width_up = TB_LINK_WIDTH_ASYM_TX;
1030 width_down = TB_LINK_WIDTH_ASYM_RX;
1033 if (up->sw->link_width == width_up)
1036 if (!tb_port_width_supported(up, width_up) ||
1037 !tb_port_width_supported(down, width_down))
1041 * Disable CL states before doing any transitions. We
1042 * delayed it until now that we know there is a real
1043 * transition taking place.
1045 if (!clx_disabled) {
1046 clx = tb_disable_clx(sw);
1047 clx_disabled = true;
1050 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1053 * Here requested + consumed > threshold so we need to
1054 * transtion the link into asymmetric now.
1056 ret = tb_switch_set_link_width(up->sw, width_up);
1058 tb_sw_warn(up->sw, "failed to set link width\n");
1063 /* Re-enable CL states if they were previosly enabled */
1071 * tb_configure_sym() - Transition links to symmetric if possible
1072 * @tb: Domain structure
1073 * @src_port: Source adapter to start the transition
1074 * @dst_port: Destination adapter
1075 * @keep_asym: Keep asymmetric link if preferred
1077 * Goes over each link from @src_port to @dst_port and tries to
1078 * transition the link to symmetric if the currently consumed bandwidth
1079 * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1081 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1082 struct tb_port *dst_port, bool keep_asym)
1084 bool clx = false, clx_disabled = false, downstream;
1085 struct tb_switch *sw;
1089 if (!asym_threshold)
1092 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1093 /* Pick up router deepest in the hierarchy */
1099 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1100 int consumed_up, consumed_down;
1102 /* Already symmetric */
1103 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1105 /* Unplugged, no need to switch */
1106 if (up->sw->is_unplugged)
1109 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1110 &consumed_up, &consumed_down);
1116 * Downstream so we want the consumed_down < threshold.
1117 * Upstream traffic should be less than 36G (40G
1118 * guard band 10%) as the link was configured asymmetric
1121 if (consumed_down >= asym_threshold)
1124 if (consumed_up >= asym_threshold)
1128 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1132 * Here consumed < threshold so we can transition the
1133 * link to symmetric.
1135 * However, if the router prefers asymmetric link we
1136 * honor that (unless @keep_asym is %false).
1139 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1140 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1144 /* Disable CL states before doing any transitions */
1145 if (!clx_disabled) {
1146 clx = tb_disable_clx(sw);
1147 clx_disabled = true;
1150 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1152 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1154 tb_sw_warn(up->sw, "failed to set link width\n");
1159 /* Re-enable CL states if they were previosly enabled */
1166 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1167 struct tb_switch *sw)
1169 struct tb *tb = sw->tb;
1171 /* Link the routers using both links if available */
1174 if (down->dual_link_port && up->dual_link_port) {
1175 down->dual_link_port->remote = up->dual_link_port;
1176 up->dual_link_port->remote = down->dual_link_port;
1180 * Enable lane bonding if the link is currently two single lane
1183 if (sw->link_width < TB_LINK_WIDTH_DUAL)
1184 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1187 * Device router that comes up as symmetric link is
1188 * connected deeper in the hierarchy, we transition the links
1189 * above into symmetric if bandwidth allows.
1191 if (tb_switch_depth(sw) > 1 &&
1192 tb_port_get_link_generation(up) >= 4 &&
1193 up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1194 struct tb_port *host_port;
1196 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1197 tb_configure_sym(tb, host_port, up, false);
1200 /* Set the link configured */
1201 tb_switch_configure_link(sw);
1204 static void tb_scan_port(struct tb_port *port);
1207 * tb_scan_switch() - scan for and initialize downstream switches
1209 static void tb_scan_switch(struct tb_switch *sw)
1211 struct tb_port *port;
1213 pm_runtime_get_sync(&sw->dev);
1215 tb_switch_for_each_port(sw, port)
1218 pm_runtime_mark_last_busy(&sw->dev);
1219 pm_runtime_put_autosuspend(&sw->dev);
1223 * tb_scan_port() - check for and initialize switches below port
1225 static void tb_scan_port(struct tb_port *port)
1227 struct tb_cm *tcm = tb_priv(port->sw->tb);
1228 struct tb_port *upstream_port;
1229 bool discovery = false;
1230 struct tb_switch *sw;
1232 if (tb_is_upstream_port(port))
1235 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1236 !tb_dp_port_is_enabled(port)) {
1237 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1238 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1243 if (port->config.type != TB_TYPE_PORT)
1245 if (port->dual_link_port && port->link_nr)
1247 * Downstream switch is reachable through two ports.
1248 * Only scan on the primary port (link_nr == 0).
1252 pm_runtime_get_sync(&port->usb4->dev);
1254 if (tb_wait_for_port(port, false) <= 0)
1257 tb_port_dbg(port, "port already has a remote\n");
1261 tb_retimer_scan(port, true);
1263 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1264 tb_downstream_route(port));
1267 * If there is an error accessing the connected switch
1268 * it may be connected to another domain. Also we allow
1269 * the other domain to be connected to a max depth switch.
1271 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1272 tb_scan_xdomain(port);
1276 if (tb_switch_configure(sw)) {
1282 * If there was previously another domain connected remove it
1285 if (port->xdomain) {
1286 tb_xdomain_remove(port->xdomain);
1287 tb_port_unconfigure_xdomain(port);
1288 port->xdomain = NULL;
1292 * Do not send uevents until we have discovered all existing
1293 * tunnels and know which switches were authorized already by
1294 * the boot firmware.
1296 if (!tcm->hotplug_active) {
1297 dev_set_uevent_suppress(&sw->dev, true);
1302 * At the moment Thunderbolt 2 and beyond (devices with LC) we
1303 * can support runtime PM.
1305 sw->rpm = sw->generation > 1;
1307 if (tb_switch_add(sw)) {
1312 upstream_port = tb_upstream_port(sw);
1313 tb_configure_link(port, upstream_port, sw);
1316 * CL0s and CL1 are enabled and supported together.
1317 * Silently ignore CLx enabling in case CLx is not supported.
1320 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1321 else if (tb_enable_clx(sw))
1322 tb_sw_warn(sw, "failed to enable CL states\n");
1324 if (tb_enable_tmu(sw))
1325 tb_sw_warn(sw, "failed to enable TMU\n");
1328 * Configuration valid needs to be set after the TMU has been
1329 * enabled for the upstream port of the router so we do it here.
1331 tb_switch_configuration_valid(sw);
1333 /* Scan upstream retimers */
1334 tb_retimer_scan(upstream_port, true);
1337 * Create USB 3.x tunnels only when the switch is plugged to the
1338 * domain. This is because we scan the domain also during discovery
1339 * and want to discover existing USB 3.x tunnels before we create
1342 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1343 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1345 tb_add_dp_resources(sw);
1350 pm_runtime_mark_last_busy(&port->usb4->dev);
1351 pm_runtime_put_autosuspend(&port->usb4->dev);
1356 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1358 struct tb_tunnel *first_tunnel;
1359 struct tb *tb = group->tb;
1363 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1366 first_tunnel = NULL;
1367 list_for_each_entry(in, &group->ports, group_list) {
1368 int estimated_bw, estimated_up, estimated_down;
1369 struct tb_tunnel *tunnel;
1370 struct tb_port *out;
1372 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1375 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1376 if (WARN_ON(!tunnel))
1379 if (!first_tunnel) {
1381 * Since USB3 bandwidth is shared by all DP
1382 * tunnels under the host router USB4 port, even
1383 * if they do not begin from the host router, we
1384 * can release USB3 bandwidth just once and not
1385 * for each tunnel separately.
1387 first_tunnel = tunnel;
1388 ret = tb_release_unused_usb3_bandwidth(tb,
1389 first_tunnel->src_port, first_tunnel->dst_port);
1391 tb_tunnel_warn(tunnel,
1392 "failed to release unused bandwidth\n");
1397 out = tunnel->dst_port;
1398 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1399 &estimated_down, true);
1401 tb_tunnel_warn(tunnel,
1402 "failed to re-calculate estimated bandwidth\n");
1407 * Estimated bandwidth includes:
1408 * - already allocated bandwidth for the DP tunnel
1409 * - available bandwidth along the path
1410 * - bandwidth allocated for USB 3.x but not used.
1412 if (tb_tunnel_direction_downstream(tunnel))
1413 estimated_bw = estimated_down;
1415 estimated_bw = estimated_up;
1418 * If there is reserved bandwidth for the group that is
1419 * not yet released we report that too.
1421 tb_tunnel_dbg(tunnel,
1422 "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
1423 estimated_bw, group->reserved,
1424 estimated_bw + group->reserved);
1426 if (usb4_dp_port_set_estimated_bandwidth(in,
1427 estimated_bw + group->reserved))
1428 tb_tunnel_warn(tunnel,
1429 "failed to update estimated bandwidth\n");
1433 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1434 first_tunnel->dst_port);
1436 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1439 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1441 struct tb_cm *tcm = tb_priv(tb);
1444 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1446 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1447 struct tb_bandwidth_group *group = &tcm->groups[i];
1449 if (!list_empty(&group->ports))
1450 tb_recalc_estimated_bandwidth_for_group(group);
1453 tb_dbg(tb, "bandwidth re-calculation done\n");
1456 static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
1458 if (group->reserved) {
1459 tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
1461 group->reserved = 0;
1467 static void __configure_group_sym(struct tb_bandwidth_group *group)
1469 struct tb_tunnel *tunnel;
1472 if (list_empty(&group->ports))
1476 * All the tunnels in the group go through the same USB4 links
1477 * so we find the first one here and pass the IN and OUT
1478 * adapters to tb_configure_sym() which now transitions the
1479 * links back to symmetric if bandwidth requirement < asym_threshold.
1481 * We do this here to avoid unnecessary transitions (for example
1482 * if the graphics released bandwidth for other tunnel in the
1485 in = list_first_entry(&group->ports, struct tb_port, group_list);
1486 tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
1488 tb_configure_sym(group->tb, in, tunnel->dst_port, true);
1491 static void tb_bandwidth_group_release_work(struct work_struct *work)
1493 struct tb_bandwidth_group *group =
1494 container_of(work, typeof(*group), release_work.work);
1495 struct tb *tb = group->tb;
1497 mutex_lock(&tb->lock);
1498 if (__release_group_bandwidth(group))
1499 tb_recalc_estimated_bandwidth(tb);
1500 __configure_group_sym(group);
1501 mutex_unlock(&tb->lock);
1504 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
1508 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1509 struct tb_bandwidth_group *group = &tcm->groups[i];
1511 group->tb = tcm_to_tb(tcm);
1512 group->index = i + 1;
1513 INIT_LIST_HEAD(&group->ports);
1514 INIT_DELAYED_WORK(&group->release_work,
1515 tb_bandwidth_group_release_work);
1519 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
1522 if (!group || WARN_ON(in->group))
1526 list_add_tail(&in->group_list, &group->ports);
1528 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
1531 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
1535 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1536 struct tb_bandwidth_group *group = &tcm->groups[i];
1538 if (list_empty(&group->ports))
1545 static struct tb_bandwidth_group *
1546 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1547 struct tb_port *out)
1549 struct tb_bandwidth_group *group;
1550 struct tb_tunnel *tunnel;
1553 * Find all DP tunnels that go through all the same USB4 links
1554 * as this one. Because we always setup tunnels the same way we
1555 * can just check for the routers at both ends of the tunnels
1556 * and if they are the same we have a match.
1558 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1559 if (!tb_tunnel_is_dp(tunnel))
1562 if (tunnel->src_port->sw == in->sw &&
1563 tunnel->dst_port->sw == out->sw) {
1564 group = tunnel->src_port->group;
1566 tb_bandwidth_group_attach_port(group, in);
1572 /* Pick up next available group then */
1573 group = tb_find_free_bandwidth_group(tcm);
1575 tb_bandwidth_group_attach_port(group, in);
1577 tb_port_warn(in, "no available bandwidth groups\n");
1582 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1583 struct tb_port *out)
1585 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1588 index = usb4_dp_port_group_id(in);
1589 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1590 if (tcm->groups[i].index == index) {
1591 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1597 tb_attach_bandwidth_group(tcm, in, out);
1600 static void tb_detach_bandwidth_group(struct tb_port *in)
1602 struct tb_bandwidth_group *group = in->group;
1606 list_del_init(&in->group_list);
1608 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1610 /* No more tunnels so release the reserved bandwidth if any */
1611 if (list_empty(&group->ports)) {
1612 cancel_delayed_work(&group->release_work);
1613 __release_group_bandwidth(group);
1618 static void tb_discover_tunnels(struct tb *tb)
1620 struct tb_cm *tcm = tb_priv(tb);
1621 struct tb_tunnel *tunnel;
1623 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
1625 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1626 if (tb_tunnel_is_pci(tunnel)) {
1627 struct tb_switch *parent = tunnel->dst_port->sw;
1629 while (parent != tunnel->src_port->sw) {
1630 parent->boot = true;
1631 parent = tb_switch_parent(parent);
1633 } else if (tb_tunnel_is_dp(tunnel)) {
1634 struct tb_port *in = tunnel->src_port;
1635 struct tb_port *out = tunnel->dst_port;
1637 /* Keep the domain from powering down */
1638 pm_runtime_get_sync(&in->sw->dev);
1639 pm_runtime_get_sync(&out->sw->dev);
1641 tb_discover_bandwidth_group(tcm, in, out);
1646 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1648 struct tb_port *src_port, *dst_port;
1654 tb_tunnel_deactivate(tunnel);
1655 list_del(&tunnel->list);
1658 src_port = tunnel->src_port;
1659 dst_port = tunnel->dst_port;
1661 switch (tunnel->type) {
1663 tb_detach_bandwidth_group(src_port);
1665 * In case of DP tunnel make sure the DP IN resource is
1666 * deallocated properly.
1668 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1670 * If bandwidth on a link is < asym_threshold
1671 * transition the link to symmetric.
1673 tb_configure_sym(tb, src_port, dst_port, true);
1674 /* Now we can allow the domain to runtime suspend again */
1675 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1676 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1677 pm_runtime_mark_last_busy(&src_port->sw->dev);
1678 pm_runtime_put_autosuspend(&src_port->sw->dev);
1681 case TB_TUNNEL_USB3:
1682 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1687 * PCIe and DMA tunnels do not consume guaranteed
1693 tb_tunnel_free(tunnel);
1697 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1699 static void tb_free_invalid_tunnels(struct tb *tb)
1701 struct tb_cm *tcm = tb_priv(tb);
1702 struct tb_tunnel *tunnel;
1703 struct tb_tunnel *n;
1705 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1706 if (tb_tunnel_is_invalid(tunnel))
1707 tb_deactivate_and_free_tunnel(tunnel);
1712 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1714 static void tb_free_unplugged_children(struct tb_switch *sw)
1716 struct tb_port *port;
1718 tb_switch_for_each_port(sw, port) {
1719 if (!tb_port_has_remote(port))
1722 if (port->remote->sw->is_unplugged) {
1723 tb_retimer_remove_all(port);
1724 tb_remove_dp_resources(port->remote->sw);
1725 tb_switch_unconfigure_link(port->remote->sw);
1726 tb_switch_set_link_width(port->remote->sw,
1727 TB_LINK_WIDTH_SINGLE);
1728 tb_switch_remove(port->remote->sw);
1729 port->remote = NULL;
1730 if (port->dual_link_port)
1731 port->dual_link_port->remote = NULL;
1733 tb_free_unplugged_children(port->remote->sw);
1738 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1739 const struct tb_port *port)
1741 struct tb_port *down = NULL;
1744 * To keep plugging devices consistently in the same PCIe
1745 * hierarchy, do mapping here for switch downstream PCIe ports.
1747 if (tb_switch_is_usb4(sw)) {
1748 down = usb4_switch_map_pcie_down(sw, port);
1749 } else if (!tb_route(sw)) {
1750 int phy_port = tb_phy_port_from_link(port->port);
1754 * Hard-coded Thunderbolt port to PCIe down port mapping
1757 if (tb_switch_is_cactus_ridge(sw) ||
1758 tb_switch_is_alpine_ridge(sw))
1759 index = !phy_port ? 6 : 7;
1760 else if (tb_switch_is_falcon_ridge(sw))
1761 index = !phy_port ? 6 : 8;
1762 else if (tb_switch_is_titan_ridge(sw))
1763 index = !phy_port ? 8 : 9;
1767 /* Validate the hard-coding */
1768 if (WARN_ON(index > sw->config.max_port_number))
1771 down = &sw->ports[index];
1775 if (WARN_ON(!tb_port_is_pcie_down(down)))
1777 if (tb_pci_port_is_enabled(down))
1784 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1787 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1789 struct tb_port *host_port, *port;
1790 struct tb_cm *tcm = tb_priv(tb);
1792 host_port = tb_route(in->sw) ?
1793 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1795 list_for_each_entry(port, &tcm->dp_resources, list) {
1796 if (!tb_port_is_dpout(port))
1799 if (tb_port_is_enabled(port)) {
1800 tb_port_dbg(port, "DP OUT in use\n");
1804 tb_port_dbg(port, "DP OUT available\n");
1807 * Keep the DP tunnel under the topology starting from
1808 * the same host router downstream port.
1810 if (host_port && tb_route(port->sw)) {
1813 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1824 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1825 struct tb_port *out)
1827 int available_up, available_down, ret, link_nr;
1828 struct tb_cm *tcm = tb_priv(tb);
1829 int consumed_up, consumed_down;
1830 struct tb_tunnel *tunnel;
1833 * This is only applicable to links that are not bonded (so
1834 * when Thunderbolt 1 hardware is involved somewhere in the
1835 * topology). For these try to share the DP bandwidth between
1839 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1840 if (tb_tunnel_is_dp(tunnel)) {
1847 * DP stream needs the domain to be active so runtime resume
1848 * both ends of the tunnel.
1850 * This should bring the routers in the middle active as well
1851 * and keeps the domain from runtime suspending while the DP
1854 pm_runtime_get_sync(&in->sw->dev);
1855 pm_runtime_get_sync(&out->sw->dev);
1857 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1858 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1862 if (!tb_attach_bandwidth_group(tcm, in, out))
1863 goto err_dealloc_dp;
1865 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1866 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1868 tb_warn(tb, "failed to release unused bandwidth\n");
1869 goto err_detach_group;
1872 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1875 goto err_reclaim_usb;
1877 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1878 available_up, available_down);
1880 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1883 tb_port_dbg(out, "could not allocate DP tunnel\n");
1884 goto err_reclaim_usb;
1887 if (tb_tunnel_activate(tunnel)) {
1888 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1892 /* If fail reading tunnel's consumed bandwidth, tear it down */
1893 ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
1895 goto err_deactivate;
1897 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1899 tb_reclaim_usb3_bandwidth(tb, in, out);
1901 * Transition the links to asymmetric if the consumption exceeds
1904 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1906 /* Update the domain with the new bandwidth estimation */
1907 tb_recalc_estimated_bandwidth(tb);
1910 * In case of DP tunnel exists, change host router's 1st children
1911 * TMU mode to HiFi for CL0s to work.
1913 tb_increase_tmu_accuracy(tunnel);
1917 tb_tunnel_deactivate(tunnel);
1919 tb_tunnel_free(tunnel);
1921 tb_reclaim_usb3_bandwidth(tb, in, out);
1923 tb_detach_bandwidth_group(in);
1925 tb_switch_dealloc_dp_resource(in->sw, in);
1927 pm_runtime_mark_last_busy(&out->sw->dev);
1928 pm_runtime_put_autosuspend(&out->sw->dev);
1929 pm_runtime_mark_last_busy(&in->sw->dev);
1930 pm_runtime_put_autosuspend(&in->sw->dev);
1935 static void tb_tunnel_dp(struct tb *tb)
1937 struct tb_cm *tcm = tb_priv(tb);
1938 struct tb_port *port, *in, *out;
1940 if (!tb_acpi_may_tunnel_dp()) {
1941 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1946 * Find pair of inactive DP IN and DP OUT adapters and then
1947 * establish a DP tunnel between them.
1949 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1953 list_for_each_entry(port, &tcm->dp_resources, list) {
1954 if (!tb_port_is_dpin(port))
1957 if (tb_port_is_enabled(port)) {
1958 tb_port_dbg(port, "DP IN in use\n");
1963 tb_port_dbg(in, "DP IN available\n");
1965 out = tb_find_dp_out(tb, port);
1967 tb_tunnel_one_dp(tb, in, out);
1969 tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
1973 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1976 static void tb_enter_redrive(struct tb_port *port)
1978 struct tb_switch *sw = port->sw;
1980 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1984 * If we get hot-unplug for the DP IN port of the host router
1985 * and the DP resource is not available anymore it means there
1986 * is a monitor connected directly to the Type-C port and we are
1987 * in "redrive" mode. For this to work we cannot enter RTD3 so
1988 * we bump up the runtime PM reference count here.
1990 if (!tb_port_is_dpin(port))
1994 if (!tb_switch_query_dp_resource(sw, port)) {
1995 port->redrive = true;
1996 pm_runtime_get(&sw->dev);
1997 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
2001 static void tb_exit_redrive(struct tb_port *port)
2003 struct tb_switch *sw = port->sw;
2005 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2008 if (!tb_port_is_dpin(port))
2012 if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
2013 port->redrive = false;
2014 pm_runtime_put(&sw->dev);
2015 tb_port_dbg(port, "exit redrive mode\n");
2019 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
2021 struct tb_port *in, *out;
2022 struct tb_tunnel *tunnel;
2024 if (tb_port_is_dpin(port)) {
2025 tb_port_dbg(port, "DP IN resource unavailable\n");
2029 tb_port_dbg(port, "DP OUT resource unavailable\n");
2034 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
2036 tb_deactivate_and_free_tunnel(tunnel);
2038 tb_enter_redrive(port);
2039 list_del_init(&port->list);
2042 * See if there is another DP OUT port that can be used for
2043 * to create another tunnel.
2045 tb_recalc_estimated_bandwidth(tb);
2049 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
2051 struct tb_cm *tcm = tb_priv(tb);
2054 if (tb_port_is_enabled(port))
2057 list_for_each_entry(p, &tcm->dp_resources, list) {
2062 tb_port_dbg(port, "DP %s resource available after hotplug\n",
2063 tb_port_is_dpin(port) ? "IN" : "OUT");
2064 list_add_tail(&port->list, &tcm->dp_resources);
2065 tb_exit_redrive(port);
2067 /* Look for suitable DP IN <-> DP OUT pairs now */
2071 static void tb_disconnect_and_release_dp(struct tb *tb)
2073 struct tb_cm *tcm = tb_priv(tb);
2074 struct tb_tunnel *tunnel, *n;
2077 * Tear down all DP tunnels and release their resources. They
2078 * will be re-established after resume based on plug events.
2080 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
2081 if (tb_tunnel_is_dp(tunnel))
2082 tb_deactivate_and_free_tunnel(tunnel);
2085 while (!list_empty(&tcm->dp_resources)) {
2086 struct tb_port *port;
2088 port = list_first_entry(&tcm->dp_resources,
2089 struct tb_port, list);
2090 list_del_init(&port->list);
2094 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2096 struct tb_tunnel *tunnel;
2099 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2103 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2104 if (WARN_ON(!tunnel))
2107 tb_switch_xhci_disconnect(sw);
2109 tb_tunnel_deactivate(tunnel);
2110 list_del(&tunnel->list);
2111 tb_tunnel_free(tunnel);
2115 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2117 struct tb_port *up, *down, *port;
2118 struct tb_cm *tcm = tb_priv(tb);
2119 struct tb_tunnel *tunnel;
2121 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2126 * Look up available down port. Since we are chaining it should
2127 * be found right above this switch.
2129 port = tb_switch_downstream_port(sw);
2130 down = tb_find_pcie_down(tb_switch_parent(sw), port);
2134 tunnel = tb_tunnel_alloc_pci(tb, up, down);
2138 if (tb_tunnel_activate(tunnel)) {
2140 "PCIe tunnel activation failed, aborting\n");
2141 tb_tunnel_free(tunnel);
2146 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2149 if (tb_switch_pcie_l1_enable(sw))
2150 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2152 if (tb_switch_xhci_connect(sw))
2153 tb_sw_warn(sw, "failed to connect xHCI\n");
2155 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2159 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2160 int transmit_path, int transmit_ring,
2161 int receive_path, int receive_ring)
2163 struct tb_cm *tcm = tb_priv(tb);
2164 struct tb_port *nhi_port, *dst_port;
2165 struct tb_tunnel *tunnel;
2166 struct tb_switch *sw;
2169 sw = tb_to_switch(xd->dev.parent);
2170 dst_port = tb_port_at(xd->route, sw);
2171 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2173 mutex_lock(&tb->lock);
2176 * When tunneling DMA paths the link should not enter CL states
2177 * so disable them now.
2181 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2182 transmit_ring, receive_path, receive_ring);
2188 if (tb_tunnel_activate(tunnel)) {
2189 tb_port_info(nhi_port,
2190 "DMA tunnel activation failed, aborting\n");
2195 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2196 mutex_unlock(&tb->lock);
2200 tb_tunnel_free(tunnel);
2203 mutex_unlock(&tb->lock);
2208 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2209 int transmit_path, int transmit_ring,
2210 int receive_path, int receive_ring)
2212 struct tb_cm *tcm = tb_priv(tb);
2213 struct tb_port *nhi_port, *dst_port;
2214 struct tb_tunnel *tunnel, *n;
2215 struct tb_switch *sw;
2217 sw = tb_to_switch(xd->dev.parent);
2218 dst_port = tb_port_at(xd->route, sw);
2219 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2221 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2222 if (!tb_tunnel_is_dma(tunnel))
2224 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2227 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2228 receive_path, receive_ring))
2229 tb_deactivate_and_free_tunnel(tunnel);
2233 * Try to re-enable CL states now, it is OK if this fails
2234 * because we may still have another DMA tunnel active through
2235 * the same host router USB4 downstream port.
2240 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2241 int transmit_path, int transmit_ring,
2242 int receive_path, int receive_ring)
2244 if (!xd->is_unplugged) {
2245 mutex_lock(&tb->lock);
2246 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2247 transmit_ring, receive_path,
2249 mutex_unlock(&tb->lock);
2254 /* hotplug handling */
2257 * tb_handle_hotplug() - handle hotplug event
2259 * Executes on tb->wq.
2261 static void tb_handle_hotplug(struct work_struct *work)
2263 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2264 struct tb *tb = ev->tb;
2265 struct tb_cm *tcm = tb_priv(tb);
2266 struct tb_switch *sw;
2267 struct tb_port *port;
2269 /* Bring the domain back from sleep if it was suspended */
2270 pm_runtime_get_sync(&tb->dev);
2272 mutex_lock(&tb->lock);
2273 if (!tcm->hotplug_active)
2274 goto out; /* during init, suspend or shutdown */
2276 sw = tb_switch_find_by_route(tb, ev->route);
2279 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2280 ev->route, ev->port, ev->unplug);
2283 if (ev->port > sw->config.max_port_number) {
2285 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2286 ev->route, ev->port, ev->unplug);
2289 port = &sw->ports[ev->port];
2290 if (tb_is_upstream_port(port)) {
2291 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2292 ev->route, ev->port, ev->unplug);
2296 pm_runtime_get_sync(&sw->dev);
2299 tb_retimer_remove_all(port);
2301 if (tb_port_has_remote(port)) {
2302 tb_port_dbg(port, "switch unplugged\n");
2303 tb_sw_set_unplugged(port->remote->sw);
2304 tb_free_invalid_tunnels(tb);
2305 tb_remove_dp_resources(port->remote->sw);
2306 tb_switch_tmu_disable(port->remote->sw);
2307 tb_switch_unconfigure_link(port->remote->sw);
2308 tb_switch_set_link_width(port->remote->sw,
2309 TB_LINK_WIDTH_SINGLE);
2310 tb_switch_remove(port->remote->sw);
2311 port->remote = NULL;
2312 if (port->dual_link_port)
2313 port->dual_link_port->remote = NULL;
2314 /* Maybe we can create another DP tunnel */
2315 tb_recalc_estimated_bandwidth(tb);
2317 } else if (port->xdomain) {
2318 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2320 tb_port_dbg(port, "xdomain unplugged\n");
2322 * Service drivers are unbound during
2323 * tb_xdomain_remove() so setting XDomain as
2324 * unplugged here prevents deadlock if they call
2325 * tb_xdomain_disable_paths(). We will tear down
2326 * all the tunnels below.
2328 xd->is_unplugged = true;
2329 tb_xdomain_remove(xd);
2330 port->xdomain = NULL;
2331 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2333 tb_port_unconfigure_xdomain(port);
2334 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2335 tb_dp_resource_unavailable(tb, port);
2336 } else if (!port->port) {
2337 tb_sw_dbg(sw, "xHCI disconnect request\n");
2338 tb_switch_xhci_disconnect(sw);
2341 "got unplug event for disconnected port, ignoring\n");
2343 } else if (port->remote) {
2344 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2345 } else if (!port->port && sw->authorized) {
2346 tb_sw_dbg(sw, "xHCI connect request\n");
2347 tb_switch_xhci_connect(sw);
2349 if (tb_port_is_null(port)) {
2350 tb_port_dbg(port, "hotplug: scanning\n");
2353 tb_port_dbg(port, "hotplug: no switch found\n");
2354 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2355 tb_dp_resource_available(tb, port);
2359 pm_runtime_mark_last_busy(&sw->dev);
2360 pm_runtime_put_autosuspend(&sw->dev);
2365 mutex_unlock(&tb->lock);
2367 pm_runtime_mark_last_busy(&tb->dev);
2368 pm_runtime_put_autosuspend(&tb->dev);
2373 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2374 int *requested_down)
2376 int allocated_up, allocated_down, available_up, available_down, ret;
2377 int requested_up_corrected, requested_down_corrected, granularity;
2378 int max_up, max_down, max_up_rounded, max_down_rounded;
2379 struct tb_bandwidth_group *group;
2380 struct tb *tb = tunnel->tb;
2381 struct tb_port *in, *out;
2384 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2388 in = tunnel->src_port;
2389 out = tunnel->dst_port;
2391 tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2392 allocated_up, allocated_down);
2395 * If we get rounded up request from graphics side, say HBR2 x 4
2396 * that is 17500 instead of 17280 (this is because of the
2397 * granularity), we allow it too. Here the graphics has already
2398 * negotiated with the DPRX the maximum possible rates (which is
2399 * 17280 in this case).
2401 * Since the link cannot go higher than 17280 we use that in our
2402 * calculations but the DP IN adapter Allocated BW write must be
2403 * the same value (17500) otherwise the adapter will mark it as
2404 * failed for graphics.
2406 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2410 ret = usb4_dp_port_granularity(in);
2415 max_up_rounded = roundup(max_up, granularity);
2416 max_down_rounded = roundup(max_down, granularity);
2419 * This will "fix" the request down to the maximum supported
2420 * rate * lanes if it is at the maximum rounded up level.
2422 requested_up_corrected = *requested_up;
2423 if (requested_up_corrected == max_up_rounded)
2424 requested_up_corrected = max_up;
2425 else if (requested_up_corrected < 0)
2426 requested_up_corrected = 0;
2427 requested_down_corrected = *requested_down;
2428 if (requested_down_corrected == max_down_rounded)
2429 requested_down_corrected = max_down;
2430 else if (requested_down_corrected < 0)
2431 requested_down_corrected = 0;
2433 tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2434 requested_up_corrected, requested_down_corrected);
2436 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2437 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2438 tb_tunnel_dbg(tunnel,
2439 "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2440 requested_up_corrected, requested_down_corrected,
2441 max_up_rounded, max_down_rounded);
2446 downstream = tb_tunnel_direction_downstream(tunnel);
2449 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2450 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2451 if (tunnel->bw_mode) {
2454 * If requested bandwidth is less or equal than
2455 * what is currently allocated to that tunnel we
2456 * simply change the reservation of the tunnel
2457 * and add the released bandwidth for the group
2458 * for the next 10s. Then we release it for
2462 reserved = allocated_down - *requested_down;
2464 reserved = allocated_up - *requested_up;
2467 group->reserved += reserved;
2468 tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
2469 group->index, reserved, group->reserved);
2472 * If it was not already pending,
2473 * schedule release now. If it is then
2474 * postpone it for the next 10s (unless
2475 * it is already running in which case
2476 * the 10s already expired and we should
2477 * give the reserved back to others).
2479 mod_delayed_work(system_wq, &group->release_work,
2480 msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
2484 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2489 * More bandwidth is requested. Release all the potential
2490 * bandwidth from USB3 first.
2492 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2497 * Then go over all tunnels that cross the same USB4 ports (they
2498 * are also in the same group but we use the same function here
2499 * that we use with the normal bandwidth allocation).
2501 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2506 tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
2507 available_up, available_down, group->reserved);
2509 if ((*requested_up >= 0 &&
2510 available_up + group->reserved >= requested_up_corrected) ||
2511 (*requested_down >= 0 &&
2512 available_down + group->reserved >= requested_down_corrected)) {
2516 * If bandwidth on a link is >= asym_threshold
2517 * transition the link to asymmetric.
2519 ret = tb_configure_asym(tb, in, out, *requested_up,
2522 tb_configure_sym(tb, in, out, true);
2526 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2529 tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2530 tb_configure_sym(tb, in, out, true);
2534 if (*requested_down > available_down)
2535 released = *requested_down - available_down;
2537 if (*requested_up > available_up)
2538 released = *requested_up - available_up;
2541 group->reserved -= released;
2542 tb_dbg(tb, "group %d released %d total %d Mb/s\n",
2543 group->index, released, group->reserved);
2550 tb_reclaim_usb3_bandwidth(tb, in, out);
2552 if (ret && ret != -ENODEV) {
2554 * Write back the same allocated (so no change), this
2555 * makes the DPTX request fail on graphics side.
2557 tb_tunnel_dbg(tunnel,
2558 "failing the request by rewriting allocated %d/%d Mb/s\n",
2559 allocated_up, allocated_down);
2560 tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
2566 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2568 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2569 int requested_bw, requested_up, requested_down, ret;
2570 struct tb_tunnel *tunnel;
2571 struct tb *tb = ev->tb;
2572 struct tb_cm *tcm = tb_priv(tb);
2573 struct tb_switch *sw;
2576 pm_runtime_get_sync(&tb->dev);
2578 mutex_lock(&tb->lock);
2579 if (!tcm->hotplug_active)
2582 sw = tb_switch_find_by_route(tb, ev->route);
2584 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2589 in = &sw->ports[ev->port];
2590 if (!tb_port_is_dpin(in)) {
2591 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2595 tb_port_dbg(in, "handling bandwidth allocation request\n");
2597 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2599 tb_port_warn(in, "failed to find tunnel\n");
2603 if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2604 if (tunnel->bw_mode) {
2606 * Reset the tunnel back to use the legacy
2609 tunnel->bw_mode = false;
2610 tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
2612 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2617 ret = usb4_dp_port_requested_bandwidth(in);
2619 if (ret == -ENODATA) {
2621 * There is no request active so this means the
2622 * BW allocation mode was enabled from graphics
2623 * side. At this point we know that the graphics
2624 * driver has read the DRPX capabilities so we
2625 * can offer an better bandwidth estimatation.
2627 tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
2628 tb_recalc_estimated_bandwidth(tb);
2630 tb_port_warn(in, "failed to read requested bandwidth\n");
2636 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2638 if (tb_tunnel_direction_downstream(tunnel)) {
2640 requested_down = requested_bw;
2642 requested_up = requested_bw;
2643 requested_down = -1;
2646 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2648 if (ret == -ENOBUFS)
2649 tb_tunnel_warn(tunnel,
2650 "not enough bandwidth available\n");
2652 tb_tunnel_warn(tunnel,
2653 "failed to change bandwidth allocation\n");
2655 tb_tunnel_dbg(tunnel,
2656 "bandwidth allocation changed to %d/%d Mb/s\n",
2657 requested_up, requested_down);
2659 /* Update other clients about the allocation change */
2660 tb_recalc_estimated_bandwidth(tb);
2666 mutex_unlock(&tb->lock);
2668 pm_runtime_mark_last_busy(&tb->dev);
2669 pm_runtime_put_autosuspend(&tb->dev);
2674 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2676 struct tb_hotplug_event *ev;
2678 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2685 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2686 queue_work(tb->wq, &ev->work);
2689 static void tb_handle_notification(struct tb *tb, u64 route,
2690 const struct cfg_error_pkg *error)
2693 switch (error->error) {
2694 case TB_CFG_ERROR_PCIE_WAKE:
2695 case TB_CFG_ERROR_DP_CON_CHANGE:
2696 case TB_CFG_ERROR_DPTX_DISCOVERY:
2697 if (tb_cfg_ack_notification(tb->ctl, route, error))
2698 tb_warn(tb, "could not ack notification on %llx\n",
2702 case TB_CFG_ERROR_DP_BW:
2703 if (tb_cfg_ack_notification(tb->ctl, route, error))
2704 tb_warn(tb, "could not ack notification on %llx\n",
2706 tb_queue_dp_bandwidth_request(tb, route, error->port);
2710 /* Ignore for now */
2716 * tb_schedule_hotplug_handler() - callback function for the control channel
2718 * Delegates to tb_handle_hotplug.
2720 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2721 const void *buf, size_t size)
2723 const struct cfg_event_pkg *pkg = buf;
2724 u64 route = tb_cfg_get_route(&pkg->header);
2727 case TB_CFG_PKG_ERROR:
2728 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2730 case TB_CFG_PKG_EVENT:
2733 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2737 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2738 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2742 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2745 static void tb_stop(struct tb *tb)
2747 struct tb_cm *tcm = tb_priv(tb);
2748 struct tb_tunnel *tunnel;
2749 struct tb_tunnel *n;
2751 cancel_delayed_work(&tcm->remove_work);
2752 /* tunnels are only present after everything has been initialized */
2753 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2755 * DMA tunnels require the driver to be functional so we
2756 * tear them down. Other protocol tunnels can be left
2759 if (tb_tunnel_is_dma(tunnel))
2760 tb_tunnel_deactivate(tunnel);
2761 tb_tunnel_free(tunnel);
2763 tb_switch_remove(tb->root_switch);
2764 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2767 static void tb_deinit(struct tb *tb)
2769 struct tb_cm *tcm = tb_priv(tb);
2772 /* Cancel all the release bandwidth workers */
2773 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
2774 cancel_delayed_work_sync(&tcm->groups[i].release_work);
2777 static int tb_scan_finalize_switch(struct device *dev, void *data)
2779 if (tb_is_switch(dev)) {
2780 struct tb_switch *sw = tb_to_switch(dev);
2783 * If we found that the switch was already setup by the
2784 * boot firmware, mark it as authorized now before we
2785 * send uevent to userspace.
2790 dev_set_uevent_suppress(dev, false);
2791 kobject_uevent(&dev->kobj, KOBJ_ADD);
2792 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2798 static int tb_start(struct tb *tb, bool reset)
2800 struct tb_cm *tcm = tb_priv(tb);
2801 bool discover = true;
2804 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2805 if (IS_ERR(tb->root_switch))
2806 return PTR_ERR(tb->root_switch);
2809 * ICM firmware upgrade needs running firmware and in native
2810 * mode that is not available so disable firmware upgrade of the
2813 * However, USB4 routers support NVM firmware upgrade if they
2814 * implement the necessary router operations.
2816 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2817 /* All USB4 routers support runtime PM */
2818 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2820 ret = tb_switch_configure(tb->root_switch);
2822 tb_switch_put(tb->root_switch);
2826 /* Announce the switch to the world */
2827 ret = tb_switch_add(tb->root_switch);
2829 tb_switch_put(tb->root_switch);
2834 * To support highest CLx state, we set host router's TMU to
2837 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2838 /* Enable TMU if it is off */
2839 tb_switch_tmu_enable(tb->root_switch);
2842 * Boot firmware might have created tunnels of its own. Since we
2843 * cannot be sure they are usable for us, tear them down and
2844 * reset the ports to handle it as new hotplug for USB4 v1
2845 * routers (for USB4 v2 and beyond we already do host reset).
2847 if (reset && tb_switch_is_usb4(tb->root_switch)) {
2849 if (usb4_switch_version(tb->root_switch) == 1)
2850 tb_switch_reset(tb->root_switch);
2854 /* Full scan to discover devices added before the driver was loaded. */
2855 tb_scan_switch(tb->root_switch);
2856 /* Find out tunnels created by the boot firmware */
2857 tb_discover_tunnels(tb);
2858 /* Add DP resources from the DP tunnels created by the boot firmware */
2859 tb_discover_dp_resources(tb);
2863 * If the boot firmware did not create USB 3.x tunnels create them
2864 * now for the whole topology.
2866 tb_create_usb3_tunnels(tb->root_switch);
2867 /* Add DP IN resources for the root switch */
2868 tb_add_dp_resources(tb->root_switch);
2869 /* Make the discovered switches available to the userspace */
2870 device_for_each_child(&tb->root_switch->dev, NULL,
2871 tb_scan_finalize_switch);
2873 /* Allow tb_handle_hotplug to progress events */
2874 tcm->hotplug_active = true;
2878 static int tb_suspend_noirq(struct tb *tb)
2880 struct tb_cm *tcm = tb_priv(tb);
2882 tb_dbg(tb, "suspending...\n");
2883 tb_disconnect_and_release_dp(tb);
2884 tb_switch_suspend(tb->root_switch, false);
2885 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2886 tb_dbg(tb, "suspend finished\n");
2891 static void tb_restore_children(struct tb_switch *sw)
2893 struct tb_port *port;
2895 /* No need to restore if the router is already unplugged */
2896 if (sw->is_unplugged)
2899 if (tb_enable_clx(sw))
2900 tb_sw_warn(sw, "failed to re-enable CL states\n");
2902 if (tb_enable_tmu(sw))
2903 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2905 tb_switch_configuration_valid(sw);
2907 tb_switch_for_each_port(sw, port) {
2908 if (!tb_port_has_remote(port) && !port->xdomain)
2912 tb_switch_set_link_width(port->remote->sw,
2913 port->remote->sw->link_width);
2914 tb_switch_configure_link(port->remote->sw);
2916 tb_restore_children(port->remote->sw);
2917 } else if (port->xdomain) {
2918 tb_port_configure_xdomain(port, port->xdomain);
2923 static int tb_resume_noirq(struct tb *tb)
2925 struct tb_cm *tcm = tb_priv(tb);
2926 struct tb_tunnel *tunnel, *n;
2927 unsigned int usb3_delay = 0;
2930 tb_dbg(tb, "resuming...\n");
2933 * For non-USB4 hosts (Apple systems) remove any PCIe devices
2934 * the firmware might have setup.
2936 if (!tb_switch_is_usb4(tb->root_switch))
2937 tb_switch_reset(tb->root_switch);
2939 tb_switch_resume(tb->root_switch);
2940 tb_free_invalid_tunnels(tb);
2941 tb_free_unplugged_children(tb->root_switch);
2942 tb_restore_children(tb->root_switch);
2945 * If we get here from suspend to disk the boot firmware or the
2946 * restore kernel might have created tunnels of its own. Since
2947 * we cannot be sure they are usable for us we find and tear
2950 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2951 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2952 if (tb_tunnel_is_usb3(tunnel))
2954 tb_tunnel_deactivate(tunnel);
2955 tb_tunnel_free(tunnel);
2958 /* Re-create our tunnels now */
2959 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2960 /* USB3 requires delay before it can be re-activated */
2961 if (tb_tunnel_is_usb3(tunnel)) {
2963 /* Only need to do it once */
2966 tb_tunnel_restart(tunnel);
2968 if (!list_empty(&tcm->tunnel_list)) {
2970 * the pcie links need some time to get going.
2971 * 100ms works for me...
2973 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2976 /* Allow tb_handle_hotplug to progress events */
2977 tcm->hotplug_active = true;
2978 tb_dbg(tb, "resume finished\n");
2983 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2985 struct tb_port *port;
2988 tb_switch_for_each_port(sw, port) {
2989 if (tb_is_upstream_port(port))
2991 if (port->xdomain && port->xdomain->is_unplugged) {
2992 tb_retimer_remove_all(port);
2993 tb_xdomain_remove(port->xdomain);
2994 tb_port_unconfigure_xdomain(port);
2995 port->xdomain = NULL;
2997 } else if (port->remote) {
2998 ret += tb_free_unplugged_xdomains(port->remote->sw);
3005 static int tb_freeze_noirq(struct tb *tb)
3007 struct tb_cm *tcm = tb_priv(tb);
3009 tcm->hotplug_active = false;
3013 static int tb_thaw_noirq(struct tb *tb)
3015 struct tb_cm *tcm = tb_priv(tb);
3017 tcm->hotplug_active = true;
3021 static void tb_complete(struct tb *tb)
3024 * Release any unplugged XDomains and if there is a case where
3025 * another domain is swapped in place of unplugged XDomain we
3026 * need to run another rescan.
3028 mutex_lock(&tb->lock);
3029 if (tb_free_unplugged_xdomains(tb->root_switch))
3030 tb_scan_switch(tb->root_switch);
3031 mutex_unlock(&tb->lock);
3034 static int tb_runtime_suspend(struct tb *tb)
3036 struct tb_cm *tcm = tb_priv(tb);
3038 mutex_lock(&tb->lock);
3039 tb_switch_suspend(tb->root_switch, true);
3040 tcm->hotplug_active = false;
3041 mutex_unlock(&tb->lock);
3046 static void tb_remove_work(struct work_struct *work)
3048 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
3049 struct tb *tb = tcm_to_tb(tcm);
3051 mutex_lock(&tb->lock);
3052 if (tb->root_switch) {
3053 tb_free_unplugged_children(tb->root_switch);
3054 tb_free_unplugged_xdomains(tb->root_switch);
3056 mutex_unlock(&tb->lock);
3059 static int tb_runtime_resume(struct tb *tb)
3061 struct tb_cm *tcm = tb_priv(tb);
3062 struct tb_tunnel *tunnel, *n;
3064 mutex_lock(&tb->lock);
3065 tb_switch_resume(tb->root_switch);
3066 tb_free_invalid_tunnels(tb);
3067 tb_restore_children(tb->root_switch);
3068 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
3069 tb_tunnel_restart(tunnel);
3070 tcm->hotplug_active = true;
3071 mutex_unlock(&tb->lock);
3074 * Schedule cleanup of any unplugged devices. Run this in a
3075 * separate thread to avoid possible deadlock if the device
3076 * removal runtime resumes the unplugged device.
3078 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
3082 static const struct tb_cm_ops tb_cm_ops = {
3085 .deinit = tb_deinit,
3086 .suspend_noirq = tb_suspend_noirq,
3087 .resume_noirq = tb_resume_noirq,
3088 .freeze_noirq = tb_freeze_noirq,
3089 .thaw_noirq = tb_thaw_noirq,
3090 .complete = tb_complete,
3091 .runtime_suspend = tb_runtime_suspend,
3092 .runtime_resume = tb_runtime_resume,
3093 .handle_event = tb_handle_event,
3094 .disapprove_switch = tb_disconnect_pci,
3095 .approve_switch = tb_tunnel_pci,
3096 .approve_xdomain_paths = tb_approve_xdomain_paths,
3097 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
3101 * During suspend the Thunderbolt controller is reset and all PCIe
3102 * tunnels are lost. The NHI driver will try to reestablish all tunnels
3103 * during resume. This adds device links between the tunneled PCIe
3104 * downstream ports and the NHI so that the device core will make sure
3105 * NHI is resumed first before the rest.
3107 static bool tb_apple_add_links(struct tb_nhi *nhi)
3109 struct pci_dev *upstream, *pdev;
3112 if (!x86_apple_machine)
3115 switch (nhi->pdev->device) {
3116 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
3117 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
3118 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
3119 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
3125 upstream = pci_upstream_bridge(nhi->pdev);
3127 if (!pci_is_pcie(upstream))
3129 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
3131 upstream = pci_upstream_bridge(upstream);
3138 * For each hotplug downstream port, create add device link
3139 * back to NHI so that PCIe tunnels can be re-established after
3143 for_each_pci_bridge(pdev, upstream->subordinate) {
3144 const struct device_link *link;
3146 if (!pci_is_pcie(pdev))
3148 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
3149 !pdev->is_hotplug_bridge)
3152 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
3153 DL_FLAG_AUTOREMOVE_SUPPLIER |
3154 DL_FLAG_PM_RUNTIME);
3156 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
3157 dev_name(&pdev->dev));
3160 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
3161 dev_name(&pdev->dev));
3168 struct tb *tb_probe(struct tb_nhi *nhi)
3173 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
3177 if (tb_acpi_may_tunnel_pcie())
3178 tb->security_level = TB_SECURITY_USER;
3180 tb->security_level = TB_SECURITY_NOPCIE;
3182 tb->cm_ops = &tb_cm_ops;
3185 INIT_LIST_HEAD(&tcm->tunnel_list);
3186 INIT_LIST_HEAD(&tcm->dp_resources);
3187 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
3188 tb_init_bandwidth_groups(tcm);
3190 tb_dbg(tb, "using software connection manager\n");
3193 * Device links are needed to make sure we establish tunnels
3194 * before the PCIe/USB stack is resumed so complain here if we
3195 * found them missing.
3197 if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
3198 tb_warn(tb, "device links to tunneled native ports are missing!\n");