1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
19 #define TB_TIMEOUT 100 /* ms */
22 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
23 * direction. This is 40G - 10% guard band bandwidth.
25 #define TB_ASYM_MIN (40000 * 90 / 100)
28 * Threshold bandwidth (in Mb/s) that is used to switch the links to
29 * asymmetric and back. This is selected as 45G which means when the
30 * request is higher than this, we switch the link to asymmetric, and
31 * when it is less than this we switch it back. The 45G is selected so
32 * that we still have 27G (of the total 72G) for bulk PCIe traffic when
33 * switching back to symmetric.
35 #define TB_ASYM_THRESHOLD 45000
37 #define MAX_GROUPS 7 /* max Group_ID is 7 */
39 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
40 module_param_named(asym_threshold, asym_threshold, uint, 0444);
41 MODULE_PARM_DESC(asym_threshold,
42 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
43 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
46 * struct tb_cm - Simple Thunderbolt connection manager
47 * @tunnel_list: List of active tunnels
48 * @dp_resources: List of available DP resources for DP tunneling
49 * @hotplug_active: tb_handle_hotplug will stop progressing plug
50 * events and exit if this is not set (it needs to
51 * acquire the lock one more time). Used to drain wq
52 * after cfg has been paused.
53 * @remove_work: Work used to remove any unplugged routers after
55 * @groups: Bandwidth groups used in this domain.
58 struct list_head tunnel_list;
59 struct list_head dp_resources;
61 struct delayed_work remove_work;
62 struct tb_bandwidth_group groups[MAX_GROUPS];
65 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 return ((void *)tcm - sizeof(struct tb));
70 struct tb_hotplug_event {
71 struct work_struct work;
78 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
82 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
83 struct tb_bandwidth_group *group = &tcm->groups[i];
85 group->tb = tcm_to_tb(tcm);
87 INIT_LIST_HEAD(&group->ports);
91 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
94 if (!group || WARN_ON(in->group))
98 list_add_tail(&in->group_list, &group->ports);
100 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
103 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
107 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
108 struct tb_bandwidth_group *group = &tcm->groups[i];
110 if (list_empty(&group->ports))
117 static struct tb_bandwidth_group *
118 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
121 struct tb_bandwidth_group *group;
122 struct tb_tunnel *tunnel;
125 * Find all DP tunnels that go through all the same USB4 links
126 * as this one. Because we always setup tunnels the same way we
127 * can just check for the routers at both ends of the tunnels
128 * and if they are the same we have a match.
130 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
131 if (!tb_tunnel_is_dp(tunnel))
134 if (tunnel->src_port->sw == in->sw &&
135 tunnel->dst_port->sw == out->sw) {
136 group = tunnel->src_port->group;
138 tb_bandwidth_group_attach_port(group, in);
144 /* Pick up next available group then */
145 group = tb_find_free_bandwidth_group(tcm);
147 tb_bandwidth_group_attach_port(group, in);
149 tb_port_warn(in, "no available bandwidth groups\n");
154 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
157 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
160 index = usb4_dp_port_group_id(in);
161 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
162 if (tcm->groups[i].index == index) {
163 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
169 tb_attach_bandwidth_group(tcm, in, out);
172 static void tb_detach_bandwidth_group(struct tb_port *in)
174 struct tb_bandwidth_group *group = in->group;
178 list_del_init(&in->group_list);
180 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
184 static void tb_handle_hotplug(struct work_struct *work);
186 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
188 struct tb_hotplug_event *ev;
190 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
198 INIT_WORK(&ev->work, tb_handle_hotplug);
199 queue_work(tb->wq, &ev->work);
202 /* enumeration & hot plug handling */
204 static void tb_add_dp_resources(struct tb_switch *sw)
206 struct tb_cm *tcm = tb_priv(sw->tb);
207 struct tb_port *port;
209 tb_switch_for_each_port(sw, port) {
210 if (!tb_port_is_dpin(port))
213 if (!tb_switch_query_dp_resource(sw, port))
217 * If DP IN on device router exist, position it at the
218 * beginning of the DP resources list, so that it is used
219 * before DP IN of the host router. This way external GPU(s)
220 * will be prioritized when pairing DP IN to a DP OUT.
223 list_add(&port->list, &tcm->dp_resources);
225 list_add_tail(&port->list, &tcm->dp_resources);
227 tb_port_dbg(port, "DP IN resource available\n");
231 static void tb_remove_dp_resources(struct tb_switch *sw)
233 struct tb_cm *tcm = tb_priv(sw->tb);
234 struct tb_port *port, *tmp;
236 /* Clear children resources first */
237 tb_switch_for_each_port(sw, port) {
238 if (tb_port_has_remote(port))
239 tb_remove_dp_resources(port->remote->sw);
242 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
243 if (port->sw == sw) {
244 tb_port_dbg(port, "DP OUT resource unavailable\n");
245 list_del_init(&port->list);
250 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
252 struct tb_cm *tcm = tb_priv(tb);
255 list_for_each_entry(p, &tcm->dp_resources, list) {
260 tb_port_dbg(port, "DP %s resource available discovered\n",
261 tb_port_is_dpin(port) ? "IN" : "OUT");
262 list_add_tail(&port->list, &tcm->dp_resources);
265 static void tb_discover_dp_resources(struct tb *tb)
267 struct tb_cm *tcm = tb_priv(tb);
268 struct tb_tunnel *tunnel;
270 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271 if (tb_tunnel_is_dp(tunnel))
272 tb_discover_dp_resource(tb, tunnel->dst_port);
276 /* Enables CL states up to host router */
277 static int tb_enable_clx(struct tb_switch *sw)
279 struct tb_cm *tcm = tb_priv(sw->tb);
280 unsigned int clx = TB_CL0S | TB_CL1;
281 const struct tb_tunnel *tunnel;
285 * Currently only enable CLx for the first link. This is enough
286 * to allow the CPU to save energy at least on Intel hardware
287 * and makes it slightly simpler to implement. We may change
288 * this in the future to cover the whole topology if it turns
289 * out to be beneficial.
291 while (sw && tb_switch_depth(sw) > 1)
292 sw = tb_switch_parent(sw);
297 if (tb_switch_depth(sw) != 1)
301 * If we are re-enabling then check if there is an active DMA
302 * tunnel and in that case bail out.
304 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
305 if (tb_tunnel_is_dma(tunnel)) {
306 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
312 * Initially try with CL2. If that's not supported by the
313 * topology try with CL0s and CL1 and then give up.
315 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
316 if (ret == -EOPNOTSUPP)
317 ret = tb_switch_clx_enable(sw, clx);
318 return ret == -EOPNOTSUPP ? 0 : ret;
322 * tb_disable_clx() - Disable CL states up to host router
323 * @sw: Router to start
325 * Disables CL states from @sw up to the host router. Returns true if
326 * any CL state were disabled. This can be used to figure out whether
327 * the link was setup by us or the boot firmware so we don't
328 * accidentally enable them if they were not enabled during discovery.
330 static bool tb_disable_clx(struct tb_switch *sw)
332 bool disabled = false;
337 ret = tb_switch_clx_disable(sw);
341 tb_sw_warn(sw, "failed to disable CL states\n");
343 sw = tb_switch_parent(sw);
349 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
351 struct tb_switch *sw;
353 sw = tb_to_switch(dev);
357 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
358 enum tb_switch_tmu_mode mode;
361 if (tb_switch_clx_is_enabled(sw, TB_CL1))
362 mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
364 mode = TB_SWITCH_TMU_MODE_HIFI_BI;
366 ret = tb_switch_tmu_configure(sw, mode);
370 return tb_switch_tmu_enable(sw);
376 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
378 struct tb_switch *sw;
384 * Once first DP tunnel is established we change the TMU
385 * accuracy of first depth child routers (and the host router)
386 * to the highest. This is needed for the DP tunneling to work
387 * but also allows CL0s.
389 * If both routers are v2 then we don't need to do anything as
390 * they are using enhanced TMU mode that allows all CLx.
392 sw = tunnel->tb->root_switch;
393 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
396 static int tb_enable_tmu(struct tb_switch *sw)
401 * If both routers at the end of the link are v2 we simply
402 * enable the enhanched uni-directional mode. That covers all
403 * the CL states. For v1 and before we need to use the normal
404 * rate to allow CL1 (when supported). Otherwise we keep the TMU
405 * running at the highest accuracy.
407 ret = tb_switch_tmu_configure(sw,
408 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
409 if (ret == -EOPNOTSUPP) {
410 if (tb_switch_clx_is_enabled(sw, TB_CL1))
411 ret = tb_switch_tmu_configure(sw,
412 TB_SWITCH_TMU_MODE_LOWRES);
414 ret = tb_switch_tmu_configure(sw,
415 TB_SWITCH_TMU_MODE_HIFI_BI);
420 /* If it is already enabled in correct mode, don't touch it */
421 if (tb_switch_tmu_is_enabled(sw))
424 ret = tb_switch_tmu_disable(sw);
428 ret = tb_switch_tmu_post_time(sw);
432 return tb_switch_tmu_enable(sw);
435 static void tb_switch_discover_tunnels(struct tb_switch *sw,
436 struct list_head *list,
439 struct tb *tb = sw->tb;
440 struct tb_port *port;
442 tb_switch_for_each_port(sw, port) {
443 struct tb_tunnel *tunnel = NULL;
445 switch (port->config.type) {
446 case TB_TYPE_DP_HDMI_IN:
447 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
448 tb_increase_tmu_accuracy(tunnel);
451 case TB_TYPE_PCIE_DOWN:
452 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
455 case TB_TYPE_USB3_DOWN:
456 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
464 list_add_tail(&tunnel->list, list);
467 tb_switch_for_each_port(sw, port) {
468 if (tb_port_has_remote(port)) {
469 tb_switch_discover_tunnels(port->remote->sw, list,
475 static void tb_discover_tunnels(struct tb *tb)
477 struct tb_cm *tcm = tb_priv(tb);
478 struct tb_tunnel *tunnel;
480 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
482 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
483 if (tb_tunnel_is_pci(tunnel)) {
484 struct tb_switch *parent = tunnel->dst_port->sw;
486 while (parent != tunnel->src_port->sw) {
488 parent = tb_switch_parent(parent);
490 } else if (tb_tunnel_is_dp(tunnel)) {
491 struct tb_port *in = tunnel->src_port;
492 struct tb_port *out = tunnel->dst_port;
494 /* Keep the domain from powering down */
495 pm_runtime_get_sync(&in->sw->dev);
496 pm_runtime_get_sync(&out->sw->dev);
498 tb_discover_bandwidth_group(tcm, in, out);
503 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
505 if (tb_switch_is_usb4(port->sw))
506 return usb4_port_configure_xdomain(port, xd);
507 return tb_lc_configure_xdomain(port);
510 static void tb_port_unconfigure_xdomain(struct tb_port *port)
512 if (tb_switch_is_usb4(port->sw))
513 usb4_port_unconfigure_xdomain(port);
515 tb_lc_unconfigure_xdomain(port);
518 static void tb_scan_xdomain(struct tb_port *port)
520 struct tb_switch *sw = port->sw;
521 struct tb *tb = sw->tb;
522 struct tb_xdomain *xd;
525 if (!tb_is_xdomain_enabled())
528 route = tb_downstream_route(port);
529 xd = tb_xdomain_find_by_route(tb, route);
535 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
538 tb_port_at(route, sw)->xdomain = xd;
539 tb_port_configure_xdomain(port, xd);
545 * tb_find_unused_port() - return the first inactive port on @sw
546 * @sw: Switch to find the port on
547 * @type: Port type to look for
549 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
550 enum tb_port_type type)
552 struct tb_port *port;
554 tb_switch_for_each_port(sw, port) {
555 if (tb_is_upstream_port(port))
557 if (port->config.type != type)
561 if (tb_port_is_enabled(port))
568 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
569 const struct tb_port *port)
571 struct tb_port *down;
573 down = usb4_switch_map_usb3_down(sw, port);
574 if (down && !tb_usb3_port_is_enabled(down))
579 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
580 struct tb_port *src_port,
581 struct tb_port *dst_port)
583 struct tb_cm *tcm = tb_priv(tb);
584 struct tb_tunnel *tunnel;
586 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
587 if (tunnel->type == type &&
588 ((src_port && src_port == tunnel->src_port) ||
589 (dst_port && dst_port == tunnel->dst_port))) {
597 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
598 struct tb_port *src_port,
599 struct tb_port *dst_port)
601 struct tb_port *port, *usb3_down;
602 struct tb_switch *sw;
604 /* Pick the router that is deepest in the topology */
605 if (tb_port_path_direction_downstream(src_port, dst_port))
610 /* Can't be the host router */
611 if (sw == tb->root_switch)
614 /* Find the downstream USB4 port that leads to this router */
615 port = tb_port_at(tb_route(sw), tb->root_switch);
616 /* Find the corresponding host router USB3 downstream port */
617 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
621 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
625 * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
626 * @tb: Domain structure
627 * @src_port: Source protocol adapter
628 * @dst_port: Destination protocol adapter
629 * @port: USB4 port the consumed bandwidth is calculated
630 * @consumed_up: Consumed upsream bandwidth (Mb/s)
631 * @consumed_down: Consumed downstream bandwidth (Mb/s)
633 * Calculates consumed USB3 and PCIe bandwidth at @port between path
634 * from @src_port to @dst_port. Does not take tunnel starting from
635 * @src_port and ending from @src_port into account.
637 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
638 struct tb_port *src_port,
639 struct tb_port *dst_port,
640 struct tb_port *port,
644 int pci_consumed_up, pci_consumed_down;
645 struct tb_tunnel *tunnel;
647 *consumed_up = *consumed_down = 0;
649 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
650 if (tunnel && tunnel->src_port != src_port &&
651 tunnel->dst_port != dst_port) {
654 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
661 * If there is anything reserved for PCIe bulk traffic take it
662 * into account here too.
664 if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
665 *consumed_up += pci_consumed_up;
666 *consumed_down += pci_consumed_down;
673 * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
674 * @tb: Domain structure
675 * @src_port: Source protocol adapter
676 * @dst_port: Destination protocol adapter
677 * @port: USB4 port the consumed bandwidth is calculated
678 * @consumed_up: Consumed upsream bandwidth (Mb/s)
679 * @consumed_down: Consumed downstream bandwidth (Mb/s)
681 * Calculates consumed DP bandwidth at @port between path from @src_port
682 * to @dst_port. Does not take tunnel starting from @src_port and ending
683 * from @src_port into account.
685 static int tb_consumed_dp_bandwidth(struct tb *tb,
686 struct tb_port *src_port,
687 struct tb_port *dst_port,
688 struct tb_port *port,
692 struct tb_cm *tcm = tb_priv(tb);
693 struct tb_tunnel *tunnel;
696 *consumed_up = *consumed_down = 0;
699 * Find all DP tunnels that cross the port and reduce
700 * their consumed bandwidth from the available.
702 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
703 int dp_consumed_up, dp_consumed_down;
705 if (tb_tunnel_is_invalid(tunnel))
708 if (!tb_tunnel_is_dp(tunnel))
711 if (!tb_tunnel_port_on_path(tunnel, port))
715 * Ignore the DP tunnel between src_port and dst_port
716 * because it is the same tunnel and we may be
717 * re-calculating estimated bandwidth.
719 if (tunnel->src_port == src_port &&
720 tunnel->dst_port == dst_port)
723 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
728 *consumed_up += dp_consumed_up;
729 *consumed_down += dp_consumed_down;
735 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
736 struct tb_port *port)
738 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
739 enum tb_link_width width;
741 if (tb_is_upstream_port(port))
742 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
744 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
746 return tb_port_width_supported(port, width);
750 * tb_maximum_bandwidth() - Maximum bandwidth over a single link
751 * @tb: Domain structure
752 * @src_port: Source protocol adapter
753 * @dst_port: Destination protocol adapter
754 * @port: USB4 port the total bandwidth is calculated
755 * @max_up: Maximum upstream bandwidth (Mb/s)
756 * @max_down: Maximum downstream bandwidth (Mb/s)
757 * @include_asym: Include bandwidth if the link is switched from
758 * symmetric to asymmetric
760 * Returns maximum possible bandwidth in @max_up and @max_down over a
761 * single link at @port. If @include_asym is set then includes the
762 * additional banwdith if the links are transitioned into asymmetric to
763 * direction from @src_port to @dst_port.
765 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
766 struct tb_port *dst_port, struct tb_port *port,
767 int *max_up, int *max_down, bool include_asym)
769 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
770 int link_speed, link_width, up_bw, down_bw;
773 * Can include asymmetric, only if it is actually supported by
776 if (!tb_asym_supported(src_port, dst_port, port))
777 include_asym = false;
779 if (tb_is_upstream_port(port)) {
780 link_speed = port->sw->link_speed;
782 * sw->link_width is from upstream perspective so we use
783 * the opposite for downstream of the host router.
785 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
786 up_bw = link_speed * 3 * 1000;
787 down_bw = link_speed * 1 * 1000;
788 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
789 up_bw = link_speed * 1 * 1000;
790 down_bw = link_speed * 3 * 1000;
791 } else if (include_asym) {
793 * The link is symmetric at the moment but we
794 * can switch it to asymmetric as needed. Report
795 * this bandwidth as available (even though it
796 * is not yet enabled).
799 up_bw = link_speed * 1 * 1000;
800 down_bw = link_speed * 3 * 1000;
802 up_bw = link_speed * 3 * 1000;
803 down_bw = link_speed * 1 * 1000;
806 up_bw = link_speed * port->sw->link_width * 1000;
810 link_speed = tb_port_get_link_speed(port);
814 link_width = tb_port_get_link_width(port);
818 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
819 up_bw = link_speed * 1 * 1000;
820 down_bw = link_speed * 3 * 1000;
821 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
822 up_bw = link_speed * 3 * 1000;
823 down_bw = link_speed * 1 * 1000;
824 } else if (include_asym) {
826 * The link is symmetric at the moment but we
827 * can switch it to asymmetric as needed. Report
828 * this bandwidth as available (even though it
829 * is not yet enabled).
832 up_bw = link_speed * 1 * 1000;
833 down_bw = link_speed * 3 * 1000;
835 up_bw = link_speed * 3 * 1000;
836 down_bw = link_speed * 1 * 1000;
839 up_bw = link_speed * link_width * 1000;
844 /* Leave 10% guard band */
845 *max_up = up_bw - up_bw / 10;
846 *max_down = down_bw - down_bw / 10;
848 tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
853 * tb_available_bandwidth() - Available bandwidth for tunneling
854 * @tb: Domain structure
855 * @src_port: Source protocol adapter
856 * @dst_port: Destination protocol adapter
857 * @available_up: Available bandwidth upstream (Mb/s)
858 * @available_down: Available bandwidth downstream (Mb/s)
859 * @include_asym: Include bandwidth if the link is switched from
860 * symmetric to asymmetric
862 * Calculates maximum available bandwidth for protocol tunneling between
863 * @src_port and @dst_port at the moment. This is minimum of maximum
864 * link bandwidth across all links reduced by currently consumed
865 * bandwidth on that link.
867 * If @include_asym is true then includes also bandwidth that can be
868 * added when the links are transitioned into asymmetric (but does not
869 * transition the links).
871 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
872 struct tb_port *dst_port, int *available_up,
873 int *available_down, bool include_asym)
875 struct tb_port *port;
878 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
879 *available_up = *available_down = 120000;
881 /* Find the minimum available bandwidth over all links */
882 tb_for_each_port_on_path(src_port, dst_port, port) {
883 int max_up, max_down, consumed_up, consumed_down;
885 if (!tb_port_is_null(port))
888 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
889 &max_up, &max_down, include_asym);
893 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
898 max_up -= consumed_up;
899 max_down -= consumed_down;
901 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
902 &consumed_up, &consumed_down);
905 max_up -= consumed_up;
906 max_down -= consumed_down;
908 if (max_up < *available_up)
909 *available_up = max_up;
910 if (max_down < *available_down)
911 *available_down = max_down;
914 if (*available_up < 0)
916 if (*available_down < 0)
922 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
923 struct tb_port *src_port,
924 struct tb_port *dst_port)
926 struct tb_tunnel *tunnel;
928 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
929 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
932 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
933 struct tb_port *dst_port)
935 int ret, available_up, available_down;
936 struct tb_tunnel *tunnel;
938 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
942 tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
945 * Calculate available bandwidth for the first hop USB3 tunnel.
946 * That determines the whole USB3 bandwidth for this branch.
948 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
949 &available_up, &available_down, false);
951 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
955 tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
958 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
961 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
963 struct tb_switch *parent = tb_switch_parent(sw);
964 int ret, available_up, available_down;
965 struct tb_port *up, *down, *port;
966 struct tb_cm *tcm = tb_priv(tb);
967 struct tb_tunnel *tunnel;
969 if (!tb_acpi_may_tunnel_usb3()) {
970 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
974 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
982 * Look up available down port. Since we are chaining it should
983 * be found right above this switch.
985 port = tb_switch_downstream_port(sw);
986 down = tb_find_usb3_down(parent, port);
990 if (tb_route(parent)) {
991 struct tb_port *parent_up;
993 * Check first that the parent switch has its upstream USB3
994 * port enabled. Otherwise the chain is not complete and
995 * there is no point setting up a new tunnel.
997 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
998 if (!parent_up || !tb_port_is_enabled(parent_up))
1001 /* Make all unused bandwidth available for the new tunnel */
1002 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
1007 ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
1012 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
1013 available_up, available_down);
1015 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
1022 if (tb_tunnel_activate(tunnel)) {
1024 "USB3 tunnel activation failed, aborting\n");
1029 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1030 if (tb_route(parent))
1031 tb_reclaim_usb3_bandwidth(tb, down, up);
1036 tb_tunnel_free(tunnel);
1038 if (tb_route(parent))
1039 tb_reclaim_usb3_bandwidth(tb, down, up);
1044 static int tb_create_usb3_tunnels(struct tb_switch *sw)
1046 struct tb_port *port;
1049 if (!tb_acpi_may_tunnel_usb3())
1053 ret = tb_tunnel_usb3(sw->tb, sw);
1058 tb_switch_for_each_port(sw, port) {
1059 if (!tb_port_has_remote(port))
1061 ret = tb_create_usb3_tunnels(port->remote->sw);
1070 * tb_configure_asym() - Transition links to asymmetric if needed
1071 * @tb: Domain structure
1072 * @src_port: Source adapter to start the transition
1073 * @dst_port: Destination adapter
1074 * @requested_up: Additional bandwidth (Mb/s) required upstream
1075 * @requested_down: Additional bandwidth (Mb/s) required downstream
1077 * Transition links between @src_port and @dst_port into asymmetric, with
1078 * three lanes in the direction from @src_port towards @dst_port and one lane
1079 * in the opposite direction, if the bandwidth requirements
1080 * (requested + currently consumed) on that link exceed @asym_threshold.
1082 * Must be called with available >= requested over all links.
1084 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1085 struct tb_port *dst_port, int requested_up,
1088 bool clx = false, clx_disabled = false, downstream;
1089 struct tb_switch *sw;
1093 if (!asym_threshold)
1096 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1097 /* Pick up router deepest in the hierarchy */
1103 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1104 struct tb_port *down = tb_switch_downstream_port(up->sw);
1105 enum tb_link_width width_up, width_down;
1106 int consumed_up, consumed_down;
1108 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1109 &consumed_up, &consumed_down);
1115 * Downstream so make sure upstream is within the 36G
1116 * (40G - guard band 10%), and the requested is above
1117 * what the threshold is.
1119 if (consumed_up + requested_up >= TB_ASYM_MIN) {
1123 /* Does consumed + requested exceed the threshold */
1124 if (consumed_down + requested_down < asym_threshold)
1127 width_up = TB_LINK_WIDTH_ASYM_RX;
1128 width_down = TB_LINK_WIDTH_ASYM_TX;
1130 /* Upstream, the opposite of above */
1131 if (consumed_down + requested_down >= TB_ASYM_MIN) {
1135 if (consumed_up + requested_up < asym_threshold)
1138 width_up = TB_LINK_WIDTH_ASYM_TX;
1139 width_down = TB_LINK_WIDTH_ASYM_RX;
1142 if (up->sw->link_width == width_up)
1145 if (!tb_port_width_supported(up, width_up) ||
1146 !tb_port_width_supported(down, width_down))
1150 * Disable CL states before doing any transitions. We
1151 * delayed it until now that we know there is a real
1152 * transition taking place.
1154 if (!clx_disabled) {
1155 clx = tb_disable_clx(sw);
1156 clx_disabled = true;
1159 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1162 * Here requested + consumed > threshold so we need to
1163 * transtion the link into asymmetric now.
1165 ret = tb_switch_set_link_width(up->sw, width_up);
1167 tb_sw_warn(up->sw, "failed to set link width\n");
1172 /* Re-enable CL states if they were previosly enabled */
1180 * tb_configure_sym() - Transition links to symmetric if possible
1181 * @tb: Domain structure
1182 * @src_port: Source adapter to start the transition
1183 * @dst_port: Destination adapter
1184 * @requested_up: New lower bandwidth request upstream (Mb/s)
1185 * @requested_down: New lower bandwidth request downstream (Mb/s)
1186 * @keep_asym: Keep asymmetric link if preferred
1188 * Goes over each link from @src_port to @dst_port and tries to
1189 * transition the link to symmetric if the currently consumed bandwidth
1190 * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1192 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1193 struct tb_port *dst_port, int requested_up,
1194 int requested_down, bool keep_asym)
1196 bool clx = false, clx_disabled = false, downstream;
1197 struct tb_switch *sw;
1201 if (!asym_threshold)
1204 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1205 /* Pick up router deepest in the hierarchy */
1211 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1212 int consumed_up, consumed_down;
1214 /* Already symmetric */
1215 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1217 /* Unplugged, no need to switch */
1218 if (up->sw->is_unplugged)
1221 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1222 &consumed_up, &consumed_down);
1228 * Downstream so we want the consumed_down < threshold.
1229 * Upstream traffic should be less than 36G (40G
1230 * guard band 10%) as the link was configured asymmetric
1233 if (consumed_down + requested_down >= asym_threshold)
1236 if (consumed_up + requested_up >= asym_threshold)
1240 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1244 * Here consumed < threshold so we can transition the
1245 * link to symmetric.
1247 * However, if the router prefers asymmetric link we
1248 * honor that (unless @keep_asym is %false).
1251 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1252 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1256 /* Disable CL states before doing any transitions */
1257 if (!clx_disabled) {
1258 clx = tb_disable_clx(sw);
1259 clx_disabled = true;
1262 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1264 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1266 tb_sw_warn(up->sw, "failed to set link width\n");
1271 /* Re-enable CL states if they were previosly enabled */
1278 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1279 struct tb_switch *sw)
1281 struct tb *tb = sw->tb;
1283 /* Link the routers using both links if available */
1286 if (down->dual_link_port && up->dual_link_port) {
1287 down->dual_link_port->remote = up->dual_link_port;
1288 up->dual_link_port->remote = down->dual_link_port;
1292 * Enable lane bonding if the link is currently two single lane
1295 if (sw->link_width < TB_LINK_WIDTH_DUAL)
1296 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1299 * Device router that comes up as symmetric link is
1300 * connected deeper in the hierarchy, we transition the links
1301 * above into symmetric if bandwidth allows.
1303 if (tb_switch_depth(sw) > 1 &&
1304 tb_port_get_link_generation(up) >= 4 &&
1305 up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1306 struct tb_port *host_port;
1308 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1309 tb_configure_sym(tb, host_port, up, 0, 0, false);
1312 /* Set the link configured */
1313 tb_switch_configure_link(sw);
1316 static void tb_scan_port(struct tb_port *port);
1319 * tb_scan_switch() - scan for and initialize downstream switches
1321 static void tb_scan_switch(struct tb_switch *sw)
1323 struct tb_port *port;
1325 pm_runtime_get_sync(&sw->dev);
1327 tb_switch_for_each_port(sw, port)
1330 pm_runtime_mark_last_busy(&sw->dev);
1331 pm_runtime_put_autosuspend(&sw->dev);
1335 * tb_scan_port() - check for and initialize switches below port
1337 static void tb_scan_port(struct tb_port *port)
1339 struct tb_cm *tcm = tb_priv(port->sw->tb);
1340 struct tb_port *upstream_port;
1341 bool discovery = false;
1342 struct tb_switch *sw;
1344 if (tb_is_upstream_port(port))
1347 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1348 !tb_dp_port_is_enabled(port)) {
1349 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1350 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1355 if (port->config.type != TB_TYPE_PORT)
1357 if (port->dual_link_port && port->link_nr)
1359 * Downstream switch is reachable through two ports.
1360 * Only scan on the primary port (link_nr == 0).
1364 pm_runtime_get_sync(&port->usb4->dev);
1366 if (tb_wait_for_port(port, false) <= 0)
1369 tb_port_dbg(port, "port already has a remote\n");
1373 tb_retimer_scan(port, true);
1375 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1376 tb_downstream_route(port));
1379 * If there is an error accessing the connected switch
1380 * it may be connected to another domain. Also we allow
1381 * the other domain to be connected to a max depth switch.
1383 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1384 tb_scan_xdomain(port);
1388 if (tb_switch_configure(sw)) {
1394 * If there was previously another domain connected remove it
1397 if (port->xdomain) {
1398 tb_xdomain_remove(port->xdomain);
1399 tb_port_unconfigure_xdomain(port);
1400 port->xdomain = NULL;
1404 * Do not send uevents until we have discovered all existing
1405 * tunnels and know which switches were authorized already by
1406 * the boot firmware.
1408 if (!tcm->hotplug_active) {
1409 dev_set_uevent_suppress(&sw->dev, true);
1414 * At the moment Thunderbolt 2 and beyond (devices with LC) we
1415 * can support runtime PM.
1417 sw->rpm = sw->generation > 1;
1419 if (tb_switch_add(sw)) {
1424 upstream_port = tb_upstream_port(sw);
1425 tb_configure_link(port, upstream_port, sw);
1428 * CL0s and CL1 are enabled and supported together.
1429 * Silently ignore CLx enabling in case CLx is not supported.
1432 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1433 else if (tb_enable_clx(sw))
1434 tb_sw_warn(sw, "failed to enable CL states\n");
1436 if (tb_enable_tmu(sw))
1437 tb_sw_warn(sw, "failed to enable TMU\n");
1440 * Configuration valid needs to be set after the TMU has been
1441 * enabled for the upstream port of the router so we do it here.
1443 tb_switch_configuration_valid(sw);
1445 /* Scan upstream retimers */
1446 tb_retimer_scan(upstream_port, true);
1449 * Create USB 3.x tunnels only when the switch is plugged to the
1450 * domain. This is because we scan the domain also during discovery
1451 * and want to discover existing USB 3.x tunnels before we create
1454 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1455 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1457 tb_add_dp_resources(sw);
1462 pm_runtime_mark_last_busy(&port->usb4->dev);
1463 pm_runtime_put_autosuspend(&port->usb4->dev);
1467 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1469 struct tb_port *src_port, *dst_port;
1475 tb_tunnel_deactivate(tunnel);
1476 list_del(&tunnel->list);
1479 src_port = tunnel->src_port;
1480 dst_port = tunnel->dst_port;
1482 switch (tunnel->type) {
1484 tb_detach_bandwidth_group(src_port);
1486 * In case of DP tunnel make sure the DP IN resource is
1487 * deallocated properly.
1489 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1491 * If bandwidth on a link is < asym_threshold
1492 * transition the link to symmetric.
1494 tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
1495 /* Now we can allow the domain to runtime suspend again */
1496 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1497 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1498 pm_runtime_mark_last_busy(&src_port->sw->dev);
1499 pm_runtime_put_autosuspend(&src_port->sw->dev);
1502 case TB_TUNNEL_USB3:
1503 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1508 * PCIe and DMA tunnels do not consume guaranteed
1514 tb_tunnel_free(tunnel);
1518 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1520 static void tb_free_invalid_tunnels(struct tb *tb)
1522 struct tb_cm *tcm = tb_priv(tb);
1523 struct tb_tunnel *tunnel;
1524 struct tb_tunnel *n;
1526 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1527 if (tb_tunnel_is_invalid(tunnel))
1528 tb_deactivate_and_free_tunnel(tunnel);
1533 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1535 static void tb_free_unplugged_children(struct tb_switch *sw)
1537 struct tb_port *port;
1539 tb_switch_for_each_port(sw, port) {
1540 if (!tb_port_has_remote(port))
1543 if (port->remote->sw->is_unplugged) {
1544 tb_retimer_remove_all(port);
1545 tb_remove_dp_resources(port->remote->sw);
1546 tb_switch_unconfigure_link(port->remote->sw);
1547 tb_switch_set_link_width(port->remote->sw,
1548 TB_LINK_WIDTH_SINGLE);
1549 tb_switch_remove(port->remote->sw);
1550 port->remote = NULL;
1551 if (port->dual_link_port)
1552 port->dual_link_port->remote = NULL;
1554 tb_free_unplugged_children(port->remote->sw);
1559 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1560 const struct tb_port *port)
1562 struct tb_port *down = NULL;
1565 * To keep plugging devices consistently in the same PCIe
1566 * hierarchy, do mapping here for switch downstream PCIe ports.
1568 if (tb_switch_is_usb4(sw)) {
1569 down = usb4_switch_map_pcie_down(sw, port);
1570 } else if (!tb_route(sw)) {
1571 int phy_port = tb_phy_port_from_link(port->port);
1575 * Hard-coded Thunderbolt port to PCIe down port mapping
1578 if (tb_switch_is_cactus_ridge(sw) ||
1579 tb_switch_is_alpine_ridge(sw))
1580 index = !phy_port ? 6 : 7;
1581 else if (tb_switch_is_falcon_ridge(sw))
1582 index = !phy_port ? 6 : 8;
1583 else if (tb_switch_is_titan_ridge(sw))
1584 index = !phy_port ? 8 : 9;
1588 /* Validate the hard-coding */
1589 if (WARN_ON(index > sw->config.max_port_number))
1592 down = &sw->ports[index];
1596 if (WARN_ON(!tb_port_is_pcie_down(down)))
1598 if (tb_pci_port_is_enabled(down))
1605 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1609 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1611 struct tb_tunnel *first_tunnel;
1612 struct tb *tb = group->tb;
1616 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1619 first_tunnel = NULL;
1620 list_for_each_entry(in, &group->ports, group_list) {
1621 int estimated_bw, estimated_up, estimated_down;
1622 struct tb_tunnel *tunnel;
1623 struct tb_port *out;
1625 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1628 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1629 if (WARN_ON(!tunnel))
1632 if (!first_tunnel) {
1634 * Since USB3 bandwidth is shared by all DP
1635 * tunnels under the host router USB4 port, even
1636 * if they do not begin from the host router, we
1637 * can release USB3 bandwidth just once and not
1638 * for each tunnel separately.
1640 first_tunnel = tunnel;
1641 ret = tb_release_unused_usb3_bandwidth(tb,
1642 first_tunnel->src_port, first_tunnel->dst_port);
1644 tb_tunnel_warn(tunnel,
1645 "failed to release unused bandwidth\n");
1650 out = tunnel->dst_port;
1651 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1652 &estimated_down, true);
1654 tb_tunnel_warn(tunnel,
1655 "failed to re-calculate estimated bandwidth\n");
1660 * Estimated bandwidth includes:
1661 * - already allocated bandwidth for the DP tunnel
1662 * - available bandwidth along the path
1663 * - bandwidth allocated for USB 3.x but not used.
1665 tb_tunnel_dbg(tunnel,
1666 "re-calculated estimated bandwidth %u/%u Mb/s\n",
1667 estimated_up, estimated_down);
1669 if (tb_port_path_direction_downstream(in, out))
1670 estimated_bw = estimated_down;
1672 estimated_bw = estimated_up;
1674 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1675 tb_tunnel_warn(tunnel,
1676 "failed to update estimated bandwidth\n");
1680 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1681 first_tunnel->dst_port);
1683 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1686 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1688 struct tb_cm *tcm = tb_priv(tb);
1691 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1693 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1694 struct tb_bandwidth_group *group = &tcm->groups[i];
1696 if (!list_empty(&group->ports))
1697 tb_recalc_estimated_bandwidth_for_group(group);
1700 tb_dbg(tb, "bandwidth re-calculation done\n");
1703 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1705 struct tb_port *host_port, *port;
1706 struct tb_cm *tcm = tb_priv(tb);
1708 host_port = tb_route(in->sw) ?
1709 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1711 list_for_each_entry(port, &tcm->dp_resources, list) {
1712 if (!tb_port_is_dpout(port))
1715 if (tb_port_is_enabled(port)) {
1716 tb_port_dbg(port, "DP OUT in use\n");
1720 tb_port_dbg(port, "DP OUT available\n");
1723 * Keep the DP tunnel under the topology starting from
1724 * the same host router downstream port.
1726 if (host_port && tb_route(port->sw)) {
1729 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1740 static bool tb_tunnel_one_dp(struct tb *tb)
1742 int available_up, available_down, ret, link_nr;
1743 struct tb_cm *tcm = tb_priv(tb);
1744 struct tb_port *port, *in, *out;
1745 int consumed_up, consumed_down;
1746 struct tb_tunnel *tunnel;
1749 * Find pair of inactive DP IN and DP OUT adapters and then
1750 * establish a DP tunnel between them.
1752 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1756 list_for_each_entry(port, &tcm->dp_resources, list) {
1757 if (!tb_port_is_dpin(port))
1760 if (tb_port_is_enabled(port)) {
1761 tb_port_dbg(port, "DP IN in use\n");
1766 tb_port_dbg(in, "DP IN available\n");
1768 out = tb_find_dp_out(tb, port);
1774 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1778 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1783 * This is only applicable to links that are not bonded (so
1784 * when Thunderbolt 1 hardware is involved somewhere in the
1785 * topology). For these try to share the DP bandwidth between
1789 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1790 if (tb_tunnel_is_dp(tunnel)) {
1797 * DP stream needs the domain to be active so runtime resume
1798 * both ends of the tunnel.
1800 * This should bring the routers in the middle active as well
1801 * and keeps the domain from runtime suspending while the DP
1804 pm_runtime_get_sync(&in->sw->dev);
1805 pm_runtime_get_sync(&out->sw->dev);
1807 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1808 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1812 if (!tb_attach_bandwidth_group(tcm, in, out))
1813 goto err_dealloc_dp;
1815 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1816 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1818 tb_warn(tb, "failed to release unused bandwidth\n");
1819 goto err_detach_group;
1822 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1825 goto err_reclaim_usb;
1827 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1828 available_up, available_down);
1830 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1833 tb_port_dbg(out, "could not allocate DP tunnel\n");
1834 goto err_reclaim_usb;
1837 if (tb_tunnel_activate(tunnel)) {
1838 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1842 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1843 tb_reclaim_usb3_bandwidth(tb, in, out);
1846 * Transition the links to asymmetric if the consumption exceeds
1849 if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
1850 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1852 /* Update the domain with the new bandwidth estimation */
1853 tb_recalc_estimated_bandwidth(tb);
1856 * In case of DP tunnel exists, change host router's 1st children
1857 * TMU mode to HiFi for CL0s to work.
1859 tb_increase_tmu_accuracy(tunnel);
1863 tb_tunnel_free(tunnel);
1865 tb_reclaim_usb3_bandwidth(tb, in, out);
1867 tb_detach_bandwidth_group(in);
1869 tb_switch_dealloc_dp_resource(in->sw, in);
1871 pm_runtime_mark_last_busy(&out->sw->dev);
1872 pm_runtime_put_autosuspend(&out->sw->dev);
1873 pm_runtime_mark_last_busy(&in->sw->dev);
1874 pm_runtime_put_autosuspend(&in->sw->dev);
1879 static void tb_tunnel_dp(struct tb *tb)
1881 if (!tb_acpi_may_tunnel_dp()) {
1882 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1886 while (tb_tunnel_one_dp(tb))
1890 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1892 struct tb_port *in, *out;
1893 struct tb_tunnel *tunnel;
1895 if (tb_port_is_dpin(port)) {
1896 tb_port_dbg(port, "DP IN resource unavailable\n");
1900 tb_port_dbg(port, "DP OUT resource unavailable\n");
1905 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1906 tb_deactivate_and_free_tunnel(tunnel);
1907 list_del_init(&port->list);
1910 * See if there is another DP OUT port that can be used for
1911 * to create another tunnel.
1913 tb_recalc_estimated_bandwidth(tb);
1917 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1919 struct tb_cm *tcm = tb_priv(tb);
1922 if (tb_port_is_enabled(port))
1925 list_for_each_entry(p, &tcm->dp_resources, list) {
1930 tb_port_dbg(port, "DP %s resource available after hotplug\n",
1931 tb_port_is_dpin(port) ? "IN" : "OUT");
1932 list_add_tail(&port->list, &tcm->dp_resources);
1934 /* Look for suitable DP IN <-> DP OUT pairs now */
1938 static void tb_disconnect_and_release_dp(struct tb *tb)
1940 struct tb_cm *tcm = tb_priv(tb);
1941 struct tb_tunnel *tunnel, *n;
1944 * Tear down all DP tunnels and release their resources. They
1945 * will be re-established after resume based on plug events.
1947 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1948 if (tb_tunnel_is_dp(tunnel))
1949 tb_deactivate_and_free_tunnel(tunnel);
1952 while (!list_empty(&tcm->dp_resources)) {
1953 struct tb_port *port;
1955 port = list_first_entry(&tcm->dp_resources,
1956 struct tb_port, list);
1957 list_del_init(&port->list);
1961 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1963 struct tb_tunnel *tunnel;
1966 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1970 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1971 if (WARN_ON(!tunnel))
1974 tb_switch_xhci_disconnect(sw);
1976 tb_tunnel_deactivate(tunnel);
1977 list_del(&tunnel->list);
1978 tb_tunnel_free(tunnel);
1982 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1984 struct tb_port *up, *down, *port;
1985 struct tb_cm *tcm = tb_priv(tb);
1986 struct tb_tunnel *tunnel;
1988 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1993 * Look up available down port. Since we are chaining it should
1994 * be found right above this switch.
1996 port = tb_switch_downstream_port(sw);
1997 down = tb_find_pcie_down(tb_switch_parent(sw), port);
2001 tunnel = tb_tunnel_alloc_pci(tb, up, down);
2005 if (tb_tunnel_activate(tunnel)) {
2007 "PCIe tunnel activation failed, aborting\n");
2008 tb_tunnel_free(tunnel);
2013 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2016 if (tb_switch_pcie_l1_enable(sw))
2017 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2019 if (tb_switch_xhci_connect(sw))
2020 tb_sw_warn(sw, "failed to connect xHCI\n");
2022 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2026 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2027 int transmit_path, int transmit_ring,
2028 int receive_path, int receive_ring)
2030 struct tb_cm *tcm = tb_priv(tb);
2031 struct tb_port *nhi_port, *dst_port;
2032 struct tb_tunnel *tunnel;
2033 struct tb_switch *sw;
2036 sw = tb_to_switch(xd->dev.parent);
2037 dst_port = tb_port_at(xd->route, sw);
2038 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2040 mutex_lock(&tb->lock);
2043 * When tunneling DMA paths the link should not enter CL states
2044 * so disable them now.
2048 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2049 transmit_ring, receive_path, receive_ring);
2055 if (tb_tunnel_activate(tunnel)) {
2056 tb_port_info(nhi_port,
2057 "DMA tunnel activation failed, aborting\n");
2062 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2063 mutex_unlock(&tb->lock);
2067 tb_tunnel_free(tunnel);
2070 mutex_unlock(&tb->lock);
2075 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2076 int transmit_path, int transmit_ring,
2077 int receive_path, int receive_ring)
2079 struct tb_cm *tcm = tb_priv(tb);
2080 struct tb_port *nhi_port, *dst_port;
2081 struct tb_tunnel *tunnel, *n;
2082 struct tb_switch *sw;
2084 sw = tb_to_switch(xd->dev.parent);
2085 dst_port = tb_port_at(xd->route, sw);
2086 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2088 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2089 if (!tb_tunnel_is_dma(tunnel))
2091 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2094 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2095 receive_path, receive_ring))
2096 tb_deactivate_and_free_tunnel(tunnel);
2100 * Try to re-enable CL states now, it is OK if this fails
2101 * because we may still have another DMA tunnel active through
2102 * the same host router USB4 downstream port.
2107 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2108 int transmit_path, int transmit_ring,
2109 int receive_path, int receive_ring)
2111 if (!xd->is_unplugged) {
2112 mutex_lock(&tb->lock);
2113 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2114 transmit_ring, receive_path,
2116 mutex_unlock(&tb->lock);
2121 /* hotplug handling */
2124 * tb_handle_hotplug() - handle hotplug event
2126 * Executes on tb->wq.
2128 static void tb_handle_hotplug(struct work_struct *work)
2130 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2131 struct tb *tb = ev->tb;
2132 struct tb_cm *tcm = tb_priv(tb);
2133 struct tb_switch *sw;
2134 struct tb_port *port;
2136 /* Bring the domain back from sleep if it was suspended */
2137 pm_runtime_get_sync(&tb->dev);
2139 mutex_lock(&tb->lock);
2140 if (!tcm->hotplug_active)
2141 goto out; /* during init, suspend or shutdown */
2143 sw = tb_switch_find_by_route(tb, ev->route);
2146 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2147 ev->route, ev->port, ev->unplug);
2150 if (ev->port > sw->config.max_port_number) {
2152 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2153 ev->route, ev->port, ev->unplug);
2156 port = &sw->ports[ev->port];
2157 if (tb_is_upstream_port(port)) {
2158 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2159 ev->route, ev->port, ev->unplug);
2163 pm_runtime_get_sync(&sw->dev);
2166 tb_retimer_remove_all(port);
2168 if (tb_port_has_remote(port)) {
2169 tb_port_dbg(port, "switch unplugged\n");
2170 tb_sw_set_unplugged(port->remote->sw);
2171 tb_free_invalid_tunnels(tb);
2172 tb_remove_dp_resources(port->remote->sw);
2173 tb_switch_tmu_disable(port->remote->sw);
2174 tb_switch_unconfigure_link(port->remote->sw);
2175 tb_switch_set_link_width(port->remote->sw,
2176 TB_LINK_WIDTH_SINGLE);
2177 tb_switch_remove(port->remote->sw);
2178 port->remote = NULL;
2179 if (port->dual_link_port)
2180 port->dual_link_port->remote = NULL;
2181 /* Maybe we can create another DP tunnel */
2182 tb_recalc_estimated_bandwidth(tb);
2184 } else if (port->xdomain) {
2185 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2187 tb_port_dbg(port, "xdomain unplugged\n");
2189 * Service drivers are unbound during
2190 * tb_xdomain_remove() so setting XDomain as
2191 * unplugged here prevents deadlock if they call
2192 * tb_xdomain_disable_paths(). We will tear down
2193 * all the tunnels below.
2195 xd->is_unplugged = true;
2196 tb_xdomain_remove(xd);
2197 port->xdomain = NULL;
2198 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2200 tb_port_unconfigure_xdomain(port);
2201 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2202 tb_dp_resource_unavailable(tb, port);
2203 } else if (!port->port) {
2204 tb_sw_dbg(sw, "xHCI disconnect request\n");
2205 tb_switch_xhci_disconnect(sw);
2208 "got unplug event for disconnected port, ignoring\n");
2210 } else if (port->remote) {
2211 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2212 } else if (!port->port && sw->authorized) {
2213 tb_sw_dbg(sw, "xHCI connect request\n");
2214 tb_switch_xhci_connect(sw);
2216 if (tb_port_is_null(port)) {
2217 tb_port_dbg(port, "hotplug: scanning\n");
2220 tb_port_dbg(port, "hotplug: no switch found\n");
2221 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2222 tb_dp_resource_available(tb, port);
2226 pm_runtime_mark_last_busy(&sw->dev);
2227 pm_runtime_put_autosuspend(&sw->dev);
2232 mutex_unlock(&tb->lock);
2234 pm_runtime_mark_last_busy(&tb->dev);
2235 pm_runtime_put_autosuspend(&tb->dev);
2240 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2241 int *requested_down)
2243 int allocated_up, allocated_down, available_up, available_down, ret;
2244 int requested_up_corrected, requested_down_corrected, granularity;
2245 int max_up, max_down, max_up_rounded, max_down_rounded;
2246 struct tb *tb = tunnel->tb;
2247 struct tb_port *in, *out;
2249 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2253 in = tunnel->src_port;
2254 out = tunnel->dst_port;
2256 tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2257 allocated_up, allocated_down);
2260 * If we get rounded up request from graphics side, say HBR2 x 4
2261 * that is 17500 instead of 17280 (this is because of the
2262 * granularity), we allow it too. Here the graphics has already
2263 * negotiated with the DPRX the maximum possible rates (which is
2264 * 17280 in this case).
2266 * Since the link cannot go higher than 17280 we use that in our
2267 * calculations but the DP IN adapter Allocated BW write must be
2268 * the same value (17500) otherwise the adapter will mark it as
2269 * failed for graphics.
2271 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2275 ret = usb4_dp_port_granularity(in);
2280 max_up_rounded = roundup(max_up, granularity);
2281 max_down_rounded = roundup(max_down, granularity);
2284 * This will "fix" the request down to the maximum supported
2285 * rate * lanes if it is at the maximum rounded up level.
2287 requested_up_corrected = *requested_up;
2288 if (requested_up_corrected == max_up_rounded)
2289 requested_up_corrected = max_up;
2290 else if (requested_up_corrected < 0)
2291 requested_up_corrected = 0;
2292 requested_down_corrected = *requested_down;
2293 if (requested_down_corrected == max_down_rounded)
2294 requested_down_corrected = max_down;
2295 else if (requested_down_corrected < 0)
2296 requested_down_corrected = 0;
2298 tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2299 requested_up_corrected, requested_down_corrected);
2301 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2302 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2303 tb_tunnel_dbg(tunnel,
2304 "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2305 requested_up_corrected, requested_down_corrected,
2306 max_up_rounded, max_down_rounded);
2310 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2311 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2313 * If bandwidth on a link is < asym_threshold transition
2314 * the link to symmetric.
2316 tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
2318 * If requested bandwidth is less or equal than what is
2319 * currently allocated to that tunnel we simply change
2320 * the reservation of the tunnel. Since all the tunnels
2321 * going out from the same USB4 port are in the same
2322 * group the released bandwidth will be taken into
2323 * account for the other tunnels automatically below.
2325 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2330 * More bandwidth is requested. Release all the potential
2331 * bandwidth from USB3 first.
2333 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2338 * Then go over all tunnels that cross the same USB4 ports (they
2339 * are also in the same group but we use the same function here
2340 * that we use with the normal bandwidth allocation).
2342 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2347 tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
2348 available_up, available_down);
2350 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
2351 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
2353 * If bandwidth on a link is >= asym_threshold
2354 * transition the link to asymmetric.
2356 ret = tb_configure_asym(tb, in, out, *requested_up,
2359 tb_configure_sym(tb, in, out, 0, 0, true);
2363 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2366 tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2367 tb_configure_sym(tb, in, out, 0, 0, true);
2374 tb_reclaim_usb3_bandwidth(tb, in, out);
2378 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2380 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2381 int requested_bw, requested_up, requested_down, ret;
2382 struct tb_port *in, *out;
2383 struct tb_tunnel *tunnel;
2384 struct tb *tb = ev->tb;
2385 struct tb_cm *tcm = tb_priv(tb);
2386 struct tb_switch *sw;
2388 pm_runtime_get_sync(&tb->dev);
2390 mutex_lock(&tb->lock);
2391 if (!tcm->hotplug_active)
2394 sw = tb_switch_find_by_route(tb, ev->route);
2396 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2401 in = &sw->ports[ev->port];
2402 if (!tb_port_is_dpin(in)) {
2403 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2407 tb_port_dbg(in, "handling bandwidth allocation request\n");
2409 if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2410 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2414 ret = usb4_dp_port_requested_bandwidth(in);
2416 if (ret == -ENODATA)
2417 tb_port_dbg(in, "no bandwidth request active\n");
2419 tb_port_warn(in, "failed to read requested bandwidth\n");
2424 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2426 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2428 tb_port_warn(in, "failed to find tunnel\n");
2432 out = tunnel->dst_port;
2434 if (tb_port_path_direction_downstream(in, out)) {
2436 requested_down = requested_bw;
2438 requested_up = requested_bw;
2439 requested_down = -1;
2442 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2444 if (ret == -ENOBUFS)
2445 tb_tunnel_warn(tunnel,
2446 "not enough bandwidth available\n");
2448 tb_tunnel_warn(tunnel,
2449 "failed to change bandwidth allocation\n");
2451 tb_tunnel_dbg(tunnel,
2452 "bandwidth allocation changed to %d/%d Mb/s\n",
2453 requested_up, requested_down);
2455 /* Update other clients about the allocation change */
2456 tb_recalc_estimated_bandwidth(tb);
2462 mutex_unlock(&tb->lock);
2464 pm_runtime_mark_last_busy(&tb->dev);
2465 pm_runtime_put_autosuspend(&tb->dev);
2470 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2472 struct tb_hotplug_event *ev;
2474 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2481 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2482 queue_work(tb->wq, &ev->work);
2485 static void tb_handle_notification(struct tb *tb, u64 route,
2486 const struct cfg_error_pkg *error)
2489 switch (error->error) {
2490 case TB_CFG_ERROR_PCIE_WAKE:
2491 case TB_CFG_ERROR_DP_CON_CHANGE:
2492 case TB_CFG_ERROR_DPTX_DISCOVERY:
2493 if (tb_cfg_ack_notification(tb->ctl, route, error))
2494 tb_warn(tb, "could not ack notification on %llx\n",
2498 case TB_CFG_ERROR_DP_BW:
2499 if (tb_cfg_ack_notification(tb->ctl, route, error))
2500 tb_warn(tb, "could not ack notification on %llx\n",
2502 tb_queue_dp_bandwidth_request(tb, route, error->port);
2506 /* Ignore for now */
2512 * tb_schedule_hotplug_handler() - callback function for the control channel
2514 * Delegates to tb_handle_hotplug.
2516 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2517 const void *buf, size_t size)
2519 const struct cfg_event_pkg *pkg = buf;
2520 u64 route = tb_cfg_get_route(&pkg->header);
2523 case TB_CFG_PKG_ERROR:
2524 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2526 case TB_CFG_PKG_EVENT:
2529 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2533 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2534 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2538 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2541 static void tb_stop(struct tb *tb)
2543 struct tb_cm *tcm = tb_priv(tb);
2544 struct tb_tunnel *tunnel;
2545 struct tb_tunnel *n;
2547 cancel_delayed_work(&tcm->remove_work);
2548 /* tunnels are only present after everything has been initialized */
2549 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2551 * DMA tunnels require the driver to be functional so we
2552 * tear them down. Other protocol tunnels can be left
2555 if (tb_tunnel_is_dma(tunnel))
2556 tb_tunnel_deactivate(tunnel);
2557 tb_tunnel_free(tunnel);
2559 tb_switch_remove(tb->root_switch);
2560 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2563 static int tb_scan_finalize_switch(struct device *dev, void *data)
2565 if (tb_is_switch(dev)) {
2566 struct tb_switch *sw = tb_to_switch(dev);
2569 * If we found that the switch was already setup by the
2570 * boot firmware, mark it as authorized now before we
2571 * send uevent to userspace.
2576 dev_set_uevent_suppress(dev, false);
2577 kobject_uevent(&dev->kobj, KOBJ_ADD);
2578 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2584 static int tb_start(struct tb *tb)
2586 struct tb_cm *tcm = tb_priv(tb);
2589 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2590 if (IS_ERR(tb->root_switch))
2591 return PTR_ERR(tb->root_switch);
2594 * ICM firmware upgrade needs running firmware and in native
2595 * mode that is not available so disable firmware upgrade of the
2598 * However, USB4 routers support NVM firmware upgrade if they
2599 * implement the necessary router operations.
2601 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2602 /* All USB4 routers support runtime PM */
2603 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2605 ret = tb_switch_configure(tb->root_switch);
2607 tb_switch_put(tb->root_switch);
2611 /* Announce the switch to the world */
2612 ret = tb_switch_add(tb->root_switch);
2614 tb_switch_put(tb->root_switch);
2619 * To support highest CLx state, we set host router's TMU to
2622 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2623 /* Enable TMU if it is off */
2624 tb_switch_tmu_enable(tb->root_switch);
2625 /* Full scan to discover devices added before the driver was loaded. */
2626 tb_scan_switch(tb->root_switch);
2627 /* Find out tunnels created by the boot firmware */
2628 tb_discover_tunnels(tb);
2629 /* Add DP resources from the DP tunnels created by the boot firmware */
2630 tb_discover_dp_resources(tb);
2632 * If the boot firmware did not create USB 3.x tunnels create them
2633 * now for the whole topology.
2635 tb_create_usb3_tunnels(tb->root_switch);
2636 /* Add DP IN resources for the root switch */
2637 tb_add_dp_resources(tb->root_switch);
2638 /* Make the discovered switches available to the userspace */
2639 device_for_each_child(&tb->root_switch->dev, NULL,
2640 tb_scan_finalize_switch);
2642 /* Allow tb_handle_hotplug to progress events */
2643 tcm->hotplug_active = true;
2647 static int tb_suspend_noirq(struct tb *tb)
2649 struct tb_cm *tcm = tb_priv(tb);
2651 tb_dbg(tb, "suspending...\n");
2652 tb_disconnect_and_release_dp(tb);
2653 tb_switch_suspend(tb->root_switch, false);
2654 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2655 tb_dbg(tb, "suspend finished\n");
2660 static void tb_restore_children(struct tb_switch *sw)
2662 struct tb_port *port;
2664 /* No need to restore if the router is already unplugged */
2665 if (sw->is_unplugged)
2668 if (tb_enable_clx(sw))
2669 tb_sw_warn(sw, "failed to re-enable CL states\n");
2671 if (tb_enable_tmu(sw))
2672 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2674 tb_switch_configuration_valid(sw);
2676 tb_switch_for_each_port(sw, port) {
2677 if (!tb_port_has_remote(port) && !port->xdomain)
2681 tb_switch_set_link_width(port->remote->sw,
2682 port->remote->sw->link_width);
2683 tb_switch_configure_link(port->remote->sw);
2685 tb_restore_children(port->remote->sw);
2686 } else if (port->xdomain) {
2687 tb_port_configure_xdomain(port, port->xdomain);
2692 static int tb_resume_noirq(struct tb *tb)
2694 struct tb_cm *tcm = tb_priv(tb);
2695 struct tb_tunnel *tunnel, *n;
2696 unsigned int usb3_delay = 0;
2699 tb_dbg(tb, "resuming...\n");
2701 /* remove any pci devices the firmware might have setup */
2702 tb_switch_reset(tb->root_switch);
2704 tb_switch_resume(tb->root_switch);
2705 tb_free_invalid_tunnels(tb);
2706 tb_free_unplugged_children(tb->root_switch);
2707 tb_restore_children(tb->root_switch);
2710 * If we get here from suspend to disk the boot firmware or the
2711 * restore kernel might have created tunnels of its own. Since
2712 * we cannot be sure they are usable for us we find and tear
2715 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2716 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2717 if (tb_tunnel_is_usb3(tunnel))
2719 tb_tunnel_deactivate(tunnel);
2720 tb_tunnel_free(tunnel);
2723 /* Re-create our tunnels now */
2724 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2725 /* USB3 requires delay before it can be re-activated */
2726 if (tb_tunnel_is_usb3(tunnel)) {
2728 /* Only need to do it once */
2731 tb_tunnel_restart(tunnel);
2733 if (!list_empty(&tcm->tunnel_list)) {
2735 * the pcie links need some time to get going.
2736 * 100ms works for me...
2738 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2741 /* Allow tb_handle_hotplug to progress events */
2742 tcm->hotplug_active = true;
2743 tb_dbg(tb, "resume finished\n");
2748 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2750 struct tb_port *port;
2753 tb_switch_for_each_port(sw, port) {
2754 if (tb_is_upstream_port(port))
2756 if (port->xdomain && port->xdomain->is_unplugged) {
2757 tb_retimer_remove_all(port);
2758 tb_xdomain_remove(port->xdomain);
2759 tb_port_unconfigure_xdomain(port);
2760 port->xdomain = NULL;
2762 } else if (port->remote) {
2763 ret += tb_free_unplugged_xdomains(port->remote->sw);
2770 static int tb_freeze_noirq(struct tb *tb)
2772 struct tb_cm *tcm = tb_priv(tb);
2774 tcm->hotplug_active = false;
2778 static int tb_thaw_noirq(struct tb *tb)
2780 struct tb_cm *tcm = tb_priv(tb);
2782 tcm->hotplug_active = true;
2786 static void tb_complete(struct tb *tb)
2789 * Release any unplugged XDomains and if there is a case where
2790 * another domain is swapped in place of unplugged XDomain we
2791 * need to run another rescan.
2793 mutex_lock(&tb->lock);
2794 if (tb_free_unplugged_xdomains(tb->root_switch))
2795 tb_scan_switch(tb->root_switch);
2796 mutex_unlock(&tb->lock);
2799 static int tb_runtime_suspend(struct tb *tb)
2801 struct tb_cm *tcm = tb_priv(tb);
2803 mutex_lock(&tb->lock);
2804 tb_switch_suspend(tb->root_switch, true);
2805 tcm->hotplug_active = false;
2806 mutex_unlock(&tb->lock);
2811 static void tb_remove_work(struct work_struct *work)
2813 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2814 struct tb *tb = tcm_to_tb(tcm);
2816 mutex_lock(&tb->lock);
2817 if (tb->root_switch) {
2818 tb_free_unplugged_children(tb->root_switch);
2819 tb_free_unplugged_xdomains(tb->root_switch);
2821 mutex_unlock(&tb->lock);
2824 static int tb_runtime_resume(struct tb *tb)
2826 struct tb_cm *tcm = tb_priv(tb);
2827 struct tb_tunnel *tunnel, *n;
2829 mutex_lock(&tb->lock);
2830 tb_switch_resume(tb->root_switch);
2831 tb_free_invalid_tunnels(tb);
2832 tb_restore_children(tb->root_switch);
2833 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2834 tb_tunnel_restart(tunnel);
2835 tcm->hotplug_active = true;
2836 mutex_unlock(&tb->lock);
2839 * Schedule cleanup of any unplugged devices. Run this in a
2840 * separate thread to avoid possible deadlock if the device
2841 * removal runtime resumes the unplugged device.
2843 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2847 static const struct tb_cm_ops tb_cm_ops = {
2850 .suspend_noirq = tb_suspend_noirq,
2851 .resume_noirq = tb_resume_noirq,
2852 .freeze_noirq = tb_freeze_noirq,
2853 .thaw_noirq = tb_thaw_noirq,
2854 .complete = tb_complete,
2855 .runtime_suspend = tb_runtime_suspend,
2856 .runtime_resume = tb_runtime_resume,
2857 .handle_event = tb_handle_event,
2858 .disapprove_switch = tb_disconnect_pci,
2859 .approve_switch = tb_tunnel_pci,
2860 .approve_xdomain_paths = tb_approve_xdomain_paths,
2861 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2865 * During suspend the Thunderbolt controller is reset and all PCIe
2866 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2867 * during resume. This adds device links between the tunneled PCIe
2868 * downstream ports and the NHI so that the device core will make sure
2869 * NHI is resumed first before the rest.
2871 static bool tb_apple_add_links(struct tb_nhi *nhi)
2873 struct pci_dev *upstream, *pdev;
2876 if (!x86_apple_machine)
2879 switch (nhi->pdev->device) {
2880 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2881 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2882 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2883 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2889 upstream = pci_upstream_bridge(nhi->pdev);
2891 if (!pci_is_pcie(upstream))
2893 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2895 upstream = pci_upstream_bridge(upstream);
2902 * For each hotplug downstream port, create add device link
2903 * back to NHI so that PCIe tunnels can be re-established after
2907 for_each_pci_bridge(pdev, upstream->subordinate) {
2908 const struct device_link *link;
2910 if (!pci_is_pcie(pdev))
2912 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2913 !pdev->is_hotplug_bridge)
2916 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2917 DL_FLAG_AUTOREMOVE_SUPPLIER |
2918 DL_FLAG_PM_RUNTIME);
2920 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2921 dev_name(&pdev->dev));
2924 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2925 dev_name(&pdev->dev));
2932 struct tb *tb_probe(struct tb_nhi *nhi)
2937 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2941 if (tb_acpi_may_tunnel_pcie())
2942 tb->security_level = TB_SECURITY_USER;
2944 tb->security_level = TB_SECURITY_NOPCIE;
2946 tb->cm_ops = &tb_cm_ops;
2949 INIT_LIST_HEAD(&tcm->tunnel_list);
2950 INIT_LIST_HEAD(&tcm->dp_resources);
2951 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2952 tb_init_bandwidth_groups(tcm);
2954 tb_dbg(tb, "using software connection manager\n");
2957 * Device links are needed to make sure we establish tunnels
2958 * before the PCIe/USB stack is resumed so complain here if we
2959 * found them missing.
2961 if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2962 tb_warn(tb, "device links to tunneled native ports are missing!\n");