Merge tag 'for-linus-6.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux-block.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT              100     /* ms */
20
21 /*
22  * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
23  * direction. This is 40G - 10% guard band bandwidth.
24  */
25 #define TB_ASYM_MIN             (40000 * 90 / 100)
26
27 /*
28  * Threshold bandwidth (in Mb/s) that is used to switch the links to
29  * asymmetric and back. This is selected as 45G which means when the
30  * request is higher than this, we switch the link to asymmetric, and
31  * when it is less than this we switch it back. The 45G is selected so
32  * that we still have 27G (of the total 72G) for bulk PCIe traffic when
33  * switching back to symmetric.
34  */
35 #define TB_ASYM_THRESHOLD       45000
36
37 #define MAX_GROUPS              7       /* max Group_ID is 7 */
38
39 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
40 module_param_named(asym_threshold, asym_threshold, uint, 0444);
41 MODULE_PARM_DESC(asym_threshold,
42                 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
43                 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
44
45 /**
46  * struct tb_cm - Simple Thunderbolt connection manager
47  * @tunnel_list: List of active tunnels
48  * @dp_resources: List of available DP resources for DP tunneling
49  * @hotplug_active: tb_handle_hotplug will stop progressing plug
50  *                  events and exit if this is not set (it needs to
51  *                  acquire the lock one more time). Used to drain wq
52  *                  after cfg has been paused.
53  * @remove_work: Work used to remove any unplugged routers after
54  *               runtime resume
55  * @groups: Bandwidth groups used in this domain.
56  */
57 struct tb_cm {
58         struct list_head tunnel_list;
59         struct list_head dp_resources;
60         bool hotplug_active;
61         struct delayed_work remove_work;
62         struct tb_bandwidth_group groups[MAX_GROUPS];
63 };
64
65 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
66 {
67         return ((void *)tcm - sizeof(struct tb));
68 }
69
70 struct tb_hotplug_event {
71         struct work_struct work;
72         struct tb *tb;
73         u64 route;
74         u8 port;
75         bool unplug;
76 };
77
78 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
79 {
80         int i;
81
82         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
83                 struct tb_bandwidth_group *group = &tcm->groups[i];
84
85                 group->tb = tcm_to_tb(tcm);
86                 group->index = i + 1;
87                 INIT_LIST_HEAD(&group->ports);
88         }
89 }
90
91 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
92                                            struct tb_port *in)
93 {
94         if (!group || WARN_ON(in->group))
95                 return;
96
97         in->group = group;
98         list_add_tail(&in->group_list, &group->ports);
99
100         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
101 }
102
103 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
104 {
105         int i;
106
107         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
108                 struct tb_bandwidth_group *group = &tcm->groups[i];
109
110                 if (list_empty(&group->ports))
111                         return group;
112         }
113
114         return NULL;
115 }
116
117 static struct tb_bandwidth_group *
118 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
119                           struct tb_port *out)
120 {
121         struct tb_bandwidth_group *group;
122         struct tb_tunnel *tunnel;
123
124         /*
125          * Find all DP tunnels that go through all the same USB4 links
126          * as this one. Because we always setup tunnels the same way we
127          * can just check for the routers at both ends of the tunnels
128          * and if they are the same we have a match.
129          */
130         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
131                 if (!tb_tunnel_is_dp(tunnel))
132                         continue;
133
134                 if (tunnel->src_port->sw == in->sw &&
135                     tunnel->dst_port->sw == out->sw) {
136                         group = tunnel->src_port->group;
137                         if (group) {
138                                 tb_bandwidth_group_attach_port(group, in);
139                                 return group;
140                         }
141                 }
142         }
143
144         /* Pick up next available group then */
145         group = tb_find_free_bandwidth_group(tcm);
146         if (group)
147                 tb_bandwidth_group_attach_port(group, in);
148         else
149                 tb_port_warn(in, "no available bandwidth groups\n");
150
151         return group;
152 }
153
154 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
155                                         struct tb_port *out)
156 {
157         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
158                 int index, i;
159
160                 index = usb4_dp_port_group_id(in);
161                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
162                         if (tcm->groups[i].index == index) {
163                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
164                                 return;
165                         }
166                 }
167         }
168
169         tb_attach_bandwidth_group(tcm, in, out);
170 }
171
172 static void tb_detach_bandwidth_group(struct tb_port *in)
173 {
174         struct tb_bandwidth_group *group = in->group;
175
176         if (group) {
177                 in->group = NULL;
178                 list_del_init(&in->group_list);
179
180                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
181         }
182 }
183
184 static void tb_handle_hotplug(struct work_struct *work);
185
186 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
187 {
188         struct tb_hotplug_event *ev;
189
190         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
191         if (!ev)
192                 return;
193
194         ev->tb = tb;
195         ev->route = route;
196         ev->port = port;
197         ev->unplug = unplug;
198         INIT_WORK(&ev->work, tb_handle_hotplug);
199         queue_work(tb->wq, &ev->work);
200 }
201
202 /* enumeration & hot plug handling */
203
204 static void tb_add_dp_resources(struct tb_switch *sw)
205 {
206         struct tb_cm *tcm = tb_priv(sw->tb);
207         struct tb_port *port;
208
209         tb_switch_for_each_port(sw, port) {
210                 if (!tb_port_is_dpin(port))
211                         continue;
212
213                 if (!tb_switch_query_dp_resource(sw, port))
214                         continue;
215
216                 /*
217                  * If DP IN on device router exist, position it at the
218                  * beginning of the DP resources list, so that it is used
219                  * before DP IN of the host router. This way external GPU(s)
220                  * will be prioritized when pairing DP IN to a DP OUT.
221                  */
222                 if (tb_route(sw))
223                         list_add(&port->list, &tcm->dp_resources);
224                 else
225                         list_add_tail(&port->list, &tcm->dp_resources);
226
227                 tb_port_dbg(port, "DP IN resource available\n");
228         }
229 }
230
231 static void tb_remove_dp_resources(struct tb_switch *sw)
232 {
233         struct tb_cm *tcm = tb_priv(sw->tb);
234         struct tb_port *port, *tmp;
235
236         /* Clear children resources first */
237         tb_switch_for_each_port(sw, port) {
238                 if (tb_port_has_remote(port))
239                         tb_remove_dp_resources(port->remote->sw);
240         }
241
242         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
243                 if (port->sw == sw) {
244                         tb_port_dbg(port, "DP OUT resource unavailable\n");
245                         list_del_init(&port->list);
246                 }
247         }
248 }
249
250 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
251 {
252         struct tb_cm *tcm = tb_priv(tb);
253         struct tb_port *p;
254
255         list_for_each_entry(p, &tcm->dp_resources, list) {
256                 if (p == port)
257                         return;
258         }
259
260         tb_port_dbg(port, "DP %s resource available discovered\n",
261                     tb_port_is_dpin(port) ? "IN" : "OUT");
262         list_add_tail(&port->list, &tcm->dp_resources);
263 }
264
265 static void tb_discover_dp_resources(struct tb *tb)
266 {
267         struct tb_cm *tcm = tb_priv(tb);
268         struct tb_tunnel *tunnel;
269
270         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271                 if (tb_tunnel_is_dp(tunnel))
272                         tb_discover_dp_resource(tb, tunnel->dst_port);
273         }
274 }
275
276 /* Enables CL states up to host router */
277 static int tb_enable_clx(struct tb_switch *sw)
278 {
279         struct tb_cm *tcm = tb_priv(sw->tb);
280         unsigned int clx = TB_CL0S | TB_CL1;
281         const struct tb_tunnel *tunnel;
282         int ret;
283
284         /*
285          * Currently only enable CLx for the first link. This is enough
286          * to allow the CPU to save energy at least on Intel hardware
287          * and makes it slightly simpler to implement. We may change
288          * this in the future to cover the whole topology if it turns
289          * out to be beneficial.
290          */
291         while (sw && tb_switch_depth(sw) > 1)
292                 sw = tb_switch_parent(sw);
293
294         if (!sw)
295                 return 0;
296
297         if (tb_switch_depth(sw) != 1)
298                 return 0;
299
300         /*
301          * If we are re-enabling then check if there is an active DMA
302          * tunnel and in that case bail out.
303          */
304         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
305                 if (tb_tunnel_is_dma(tunnel)) {
306                         if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
307                                 return 0;
308                 }
309         }
310
311         /*
312          * Initially try with CL2. If that's not supported by the
313          * topology try with CL0s and CL1 and then give up.
314          */
315         ret = tb_switch_clx_enable(sw, clx | TB_CL2);
316         if (ret == -EOPNOTSUPP)
317                 ret = tb_switch_clx_enable(sw, clx);
318         return ret == -EOPNOTSUPP ? 0 : ret;
319 }
320
321 /**
322  * tb_disable_clx() - Disable CL states up to host router
323  * @sw: Router to start
324  *
325  * Disables CL states from @sw up to the host router. Returns true if
326  * any CL state were disabled. This can be used to figure out whether
327  * the link was setup by us or the boot firmware so we don't
328  * accidentally enable them if they were not enabled during discovery.
329  */
330 static bool tb_disable_clx(struct tb_switch *sw)
331 {
332         bool disabled = false;
333
334         do {
335                 int ret;
336
337                 ret = tb_switch_clx_disable(sw);
338                 if (ret > 0)
339                         disabled = true;
340                 else if (ret < 0)
341                         tb_sw_warn(sw, "failed to disable CL states\n");
342
343                 sw = tb_switch_parent(sw);
344         } while (sw);
345
346         return disabled;
347 }
348
349 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
350 {
351         struct tb_switch *sw;
352
353         sw = tb_to_switch(dev);
354         if (!sw)
355                 return 0;
356
357         if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
358                 enum tb_switch_tmu_mode mode;
359                 int ret;
360
361                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
362                         mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
363                 else
364                         mode = TB_SWITCH_TMU_MODE_HIFI_BI;
365
366                 ret = tb_switch_tmu_configure(sw, mode);
367                 if (ret)
368                         return ret;
369
370                 return tb_switch_tmu_enable(sw);
371         }
372
373         return 0;
374 }
375
376 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
377 {
378         struct tb_switch *sw;
379
380         if (!tunnel)
381                 return;
382
383         /*
384          * Once first DP tunnel is established we change the TMU
385          * accuracy of first depth child routers (and the host router)
386          * to the highest. This is needed for the DP tunneling to work
387          * but also allows CL0s.
388          *
389          * If both routers are v2 then we don't need to do anything as
390          * they are using enhanced TMU mode that allows all CLx.
391          */
392         sw = tunnel->tb->root_switch;
393         device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
394 }
395
396 static int tb_enable_tmu(struct tb_switch *sw)
397 {
398         int ret;
399
400         /*
401          * If both routers at the end of the link are v2 we simply
402          * enable the enhanched uni-directional mode. That covers all
403          * the CL states. For v1 and before we need to use the normal
404          * rate to allow CL1 (when supported). Otherwise we keep the TMU
405          * running at the highest accuracy.
406          */
407         ret = tb_switch_tmu_configure(sw,
408                         TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
409         if (ret == -EOPNOTSUPP) {
410                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
411                         ret = tb_switch_tmu_configure(sw,
412                                         TB_SWITCH_TMU_MODE_LOWRES);
413                 else
414                         ret = tb_switch_tmu_configure(sw,
415                                         TB_SWITCH_TMU_MODE_HIFI_BI);
416         }
417         if (ret)
418                 return ret;
419
420         /* If it is already enabled in correct mode, don't touch it */
421         if (tb_switch_tmu_is_enabled(sw))
422                 return 0;
423
424         ret = tb_switch_tmu_disable(sw);
425         if (ret)
426                 return ret;
427
428         ret = tb_switch_tmu_post_time(sw);
429         if (ret)
430                 return ret;
431
432         return tb_switch_tmu_enable(sw);
433 }
434
435 static void tb_switch_discover_tunnels(struct tb_switch *sw,
436                                        struct list_head *list,
437                                        bool alloc_hopids)
438 {
439         struct tb *tb = sw->tb;
440         struct tb_port *port;
441
442         tb_switch_for_each_port(sw, port) {
443                 struct tb_tunnel *tunnel = NULL;
444
445                 switch (port->config.type) {
446                 case TB_TYPE_DP_HDMI_IN:
447                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
448                         tb_increase_tmu_accuracy(tunnel);
449                         break;
450
451                 case TB_TYPE_PCIE_DOWN:
452                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
453                         break;
454
455                 case TB_TYPE_USB3_DOWN:
456                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
457                         break;
458
459                 default:
460                         break;
461                 }
462
463                 if (tunnel)
464                         list_add_tail(&tunnel->list, list);
465         }
466
467         tb_switch_for_each_port(sw, port) {
468                 if (tb_port_has_remote(port)) {
469                         tb_switch_discover_tunnels(port->remote->sw, list,
470                                                    alloc_hopids);
471                 }
472         }
473 }
474
475 static void tb_discover_tunnels(struct tb *tb)
476 {
477         struct tb_cm *tcm = tb_priv(tb);
478         struct tb_tunnel *tunnel;
479
480         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
481
482         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
483                 if (tb_tunnel_is_pci(tunnel)) {
484                         struct tb_switch *parent = tunnel->dst_port->sw;
485
486                         while (parent != tunnel->src_port->sw) {
487                                 parent->boot = true;
488                                 parent = tb_switch_parent(parent);
489                         }
490                 } else if (tb_tunnel_is_dp(tunnel)) {
491                         struct tb_port *in = tunnel->src_port;
492                         struct tb_port *out = tunnel->dst_port;
493
494                         /* Keep the domain from powering down */
495                         pm_runtime_get_sync(&in->sw->dev);
496                         pm_runtime_get_sync(&out->sw->dev);
497
498                         tb_discover_bandwidth_group(tcm, in, out);
499                 }
500         }
501 }
502
503 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
504 {
505         if (tb_switch_is_usb4(port->sw))
506                 return usb4_port_configure_xdomain(port, xd);
507         return tb_lc_configure_xdomain(port);
508 }
509
510 static void tb_port_unconfigure_xdomain(struct tb_port *port)
511 {
512         if (tb_switch_is_usb4(port->sw))
513                 usb4_port_unconfigure_xdomain(port);
514         else
515                 tb_lc_unconfigure_xdomain(port);
516 }
517
518 static void tb_scan_xdomain(struct tb_port *port)
519 {
520         struct tb_switch *sw = port->sw;
521         struct tb *tb = sw->tb;
522         struct tb_xdomain *xd;
523         u64 route;
524
525         if (!tb_is_xdomain_enabled())
526                 return;
527
528         route = tb_downstream_route(port);
529         xd = tb_xdomain_find_by_route(tb, route);
530         if (xd) {
531                 tb_xdomain_put(xd);
532                 return;
533         }
534
535         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
536                               NULL);
537         if (xd) {
538                 tb_port_at(route, sw)->xdomain = xd;
539                 tb_port_configure_xdomain(port, xd);
540                 tb_xdomain_add(xd);
541         }
542 }
543
544 /**
545  * tb_find_unused_port() - return the first inactive port on @sw
546  * @sw: Switch to find the port on
547  * @type: Port type to look for
548  */
549 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
550                                            enum tb_port_type type)
551 {
552         struct tb_port *port;
553
554         tb_switch_for_each_port(sw, port) {
555                 if (tb_is_upstream_port(port))
556                         continue;
557                 if (port->config.type != type)
558                         continue;
559                 if (!port->cap_adap)
560                         continue;
561                 if (tb_port_is_enabled(port))
562                         continue;
563                 return port;
564         }
565         return NULL;
566 }
567
568 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
569                                          const struct tb_port *port)
570 {
571         struct tb_port *down;
572
573         down = usb4_switch_map_usb3_down(sw, port);
574         if (down && !tb_usb3_port_is_enabled(down))
575                 return down;
576         return NULL;
577 }
578
579 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
580                                         struct tb_port *src_port,
581                                         struct tb_port *dst_port)
582 {
583         struct tb_cm *tcm = tb_priv(tb);
584         struct tb_tunnel *tunnel;
585
586         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
587                 if (tunnel->type == type &&
588                     ((src_port && src_port == tunnel->src_port) ||
589                      (dst_port && dst_port == tunnel->dst_port))) {
590                         return tunnel;
591                 }
592         }
593
594         return NULL;
595 }
596
597 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
598                                                    struct tb_port *src_port,
599                                                    struct tb_port *dst_port)
600 {
601         struct tb_port *port, *usb3_down;
602         struct tb_switch *sw;
603
604         /* Pick the router that is deepest in the topology */
605         if (tb_port_path_direction_downstream(src_port, dst_port))
606                 sw = dst_port->sw;
607         else
608                 sw = src_port->sw;
609
610         /* Can't be the host router */
611         if (sw == tb->root_switch)
612                 return NULL;
613
614         /* Find the downstream USB4 port that leads to this router */
615         port = tb_port_at(tb_route(sw), tb->root_switch);
616         /* Find the corresponding host router USB3 downstream port */
617         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
618         if (!usb3_down)
619                 return NULL;
620
621         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
622 }
623
624 /**
625  * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
626  * @tb: Domain structure
627  * @src_port: Source protocol adapter
628  * @dst_port: Destination protocol adapter
629  * @port: USB4 port the consumed bandwidth is calculated
630  * @consumed_up: Consumed upsream bandwidth (Mb/s)
631  * @consumed_down: Consumed downstream bandwidth (Mb/s)
632  *
633  * Calculates consumed USB3 and PCIe bandwidth at @port between path
634  * from @src_port to @dst_port. Does not take tunnel starting from
635  * @src_port and ending from @src_port into account.
636  */
637 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
638                                            struct tb_port *src_port,
639                                            struct tb_port *dst_port,
640                                            struct tb_port *port,
641                                            int *consumed_up,
642                                            int *consumed_down)
643 {
644         int pci_consumed_up, pci_consumed_down;
645         struct tb_tunnel *tunnel;
646
647         *consumed_up = *consumed_down = 0;
648
649         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
650         if (tunnel && tunnel->src_port != src_port &&
651             tunnel->dst_port != dst_port) {
652                 int ret;
653
654                 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
655                                                    consumed_down);
656                 if (ret)
657                         return ret;
658         }
659
660         /*
661          * If there is anything reserved for PCIe bulk traffic take it
662          * into account here too.
663          */
664         if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
665                 *consumed_up += pci_consumed_up;
666                 *consumed_down += pci_consumed_down;
667         }
668
669         return 0;
670 }
671
672 /**
673  * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
674  * @tb: Domain structure
675  * @src_port: Source protocol adapter
676  * @dst_port: Destination protocol adapter
677  * @port: USB4 port the consumed bandwidth is calculated
678  * @consumed_up: Consumed upsream bandwidth (Mb/s)
679  * @consumed_down: Consumed downstream bandwidth (Mb/s)
680  *
681  * Calculates consumed DP bandwidth at @port between path from @src_port
682  * to @dst_port. Does not take tunnel starting from @src_port and ending
683  * from @src_port into account.
684  */
685 static int tb_consumed_dp_bandwidth(struct tb *tb,
686                                     struct tb_port *src_port,
687                                     struct tb_port *dst_port,
688                                     struct tb_port *port,
689                                     int *consumed_up,
690                                     int *consumed_down)
691 {
692         struct tb_cm *tcm = tb_priv(tb);
693         struct tb_tunnel *tunnel;
694         int ret;
695
696         *consumed_up = *consumed_down = 0;
697
698         /*
699          * Find all DP tunnels that cross the port and reduce
700          * their consumed bandwidth from the available.
701          */
702         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
703                 int dp_consumed_up, dp_consumed_down;
704
705                 if (tb_tunnel_is_invalid(tunnel))
706                         continue;
707
708                 if (!tb_tunnel_is_dp(tunnel))
709                         continue;
710
711                 if (!tb_tunnel_port_on_path(tunnel, port))
712                         continue;
713
714                 /*
715                  * Ignore the DP tunnel between src_port and dst_port
716                  * because it is the same tunnel and we may be
717                  * re-calculating estimated bandwidth.
718                  */
719                 if (tunnel->src_port == src_port &&
720                     tunnel->dst_port == dst_port)
721                         continue;
722
723                 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
724                                                    &dp_consumed_down);
725                 if (ret)
726                         return ret;
727
728                 *consumed_up += dp_consumed_up;
729                 *consumed_down += dp_consumed_down;
730         }
731
732         return 0;
733 }
734
735 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
736                               struct tb_port *port)
737 {
738         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
739         enum tb_link_width width;
740
741         if (tb_is_upstream_port(port))
742                 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
743         else
744                 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
745
746         return tb_port_width_supported(port, width);
747 }
748
749 /**
750  * tb_maximum_bandwidth() - Maximum bandwidth over a single link
751  * @tb: Domain structure
752  * @src_port: Source protocol adapter
753  * @dst_port: Destination protocol adapter
754  * @port: USB4 port the total bandwidth is calculated
755  * @max_up: Maximum upstream bandwidth (Mb/s)
756  * @max_down: Maximum downstream bandwidth (Mb/s)
757  * @include_asym: Include bandwidth if the link is switched from
758  *                symmetric to asymmetric
759  *
760  * Returns maximum possible bandwidth in @max_up and @max_down over a
761  * single link at @port. If @include_asym is set then includes the
762  * additional banwdith if the links are transitioned into asymmetric to
763  * direction from @src_port to @dst_port.
764  */
765 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
766                                 struct tb_port *dst_port, struct tb_port *port,
767                                 int *max_up, int *max_down, bool include_asym)
768 {
769         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
770         int link_speed, link_width, up_bw, down_bw;
771
772         /*
773          * Can include asymmetric, only if it is actually supported by
774          * the lane adapter.
775          */
776         if (!tb_asym_supported(src_port, dst_port, port))
777                 include_asym = false;
778
779         if (tb_is_upstream_port(port)) {
780                 link_speed = port->sw->link_speed;
781                 /*
782                  * sw->link_width is from upstream perspective so we use
783                  * the opposite for downstream of the host router.
784                  */
785                 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
786                         up_bw = link_speed * 3 * 1000;
787                         down_bw = link_speed * 1 * 1000;
788                 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
789                         up_bw = link_speed * 1 * 1000;
790                         down_bw = link_speed * 3 * 1000;
791                 } else if (include_asym) {
792                         /*
793                          * The link is symmetric at the moment but we
794                          * can switch it to asymmetric as needed. Report
795                          * this bandwidth as available (even though it
796                          * is not yet enabled).
797                          */
798                         if (downstream) {
799                                 up_bw = link_speed * 1 * 1000;
800                                 down_bw = link_speed * 3 * 1000;
801                         } else {
802                                 up_bw = link_speed * 3 * 1000;
803                                 down_bw = link_speed * 1 * 1000;
804                         }
805                 } else {
806                         up_bw = link_speed * port->sw->link_width * 1000;
807                         down_bw = up_bw;
808                 }
809         } else {
810                 link_speed = tb_port_get_link_speed(port);
811                 if (link_speed < 0)
812                         return link_speed;
813
814                 link_width = tb_port_get_link_width(port);
815                 if (link_width < 0)
816                         return link_width;
817
818                 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
819                         up_bw = link_speed * 1 * 1000;
820                         down_bw = link_speed * 3 * 1000;
821                 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
822                         up_bw = link_speed * 3 * 1000;
823                         down_bw = link_speed * 1 * 1000;
824                 } else if (include_asym) {
825                         /*
826                          * The link is symmetric at the moment but we
827                          * can switch it to asymmetric as needed. Report
828                          * this bandwidth as available (even though it
829                          * is not yet enabled).
830                          */
831                         if (downstream) {
832                                 up_bw = link_speed * 1 * 1000;
833                                 down_bw = link_speed * 3 * 1000;
834                         } else {
835                                 up_bw = link_speed * 3 * 1000;
836                                 down_bw = link_speed * 1 * 1000;
837                         }
838                 } else {
839                         up_bw = link_speed * link_width * 1000;
840                         down_bw = up_bw;
841                 }
842         }
843
844         /* Leave 10% guard band */
845         *max_up = up_bw - up_bw / 10;
846         *max_down = down_bw - down_bw / 10;
847
848         tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
849         return 0;
850 }
851
852 /**
853  * tb_available_bandwidth() - Available bandwidth for tunneling
854  * @tb: Domain structure
855  * @src_port: Source protocol adapter
856  * @dst_port: Destination protocol adapter
857  * @available_up: Available bandwidth upstream (Mb/s)
858  * @available_down: Available bandwidth downstream (Mb/s)
859  * @include_asym: Include bandwidth if the link is switched from
860  *                symmetric to asymmetric
861  *
862  * Calculates maximum available bandwidth for protocol tunneling between
863  * @src_port and @dst_port at the moment. This is minimum of maximum
864  * link bandwidth across all links reduced by currently consumed
865  * bandwidth on that link.
866  *
867  * If @include_asym is true then includes also bandwidth that can be
868  * added when the links are transitioned into asymmetric (but does not
869  * transition the links).
870  */
871 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
872                                  struct tb_port *dst_port, int *available_up,
873                                  int *available_down, bool include_asym)
874 {
875         struct tb_port *port;
876         int ret;
877
878         /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
879         *available_up = *available_down = 120000;
880
881         /* Find the minimum available bandwidth over all links */
882         tb_for_each_port_on_path(src_port, dst_port, port) {
883                 int max_up, max_down, consumed_up, consumed_down;
884
885                 if (!tb_port_is_null(port))
886                         continue;
887
888                 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
889                                            &max_up, &max_down, include_asym);
890                 if (ret)
891                         return ret;
892
893                 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
894                                                       port, &consumed_up,
895                                                       &consumed_down);
896                 if (ret)
897                         return ret;
898                 max_up -= consumed_up;
899                 max_down -= consumed_down;
900
901                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
902                                                &consumed_up, &consumed_down);
903                 if (ret)
904                         return ret;
905                 max_up -= consumed_up;
906                 max_down -= consumed_down;
907
908                 if (max_up < *available_up)
909                         *available_up = max_up;
910                 if (max_down < *available_down)
911                         *available_down = max_down;
912         }
913
914         if (*available_up < 0)
915                 *available_up = 0;
916         if (*available_down < 0)
917                 *available_down = 0;
918
919         return 0;
920 }
921
922 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
923                                             struct tb_port *src_port,
924                                             struct tb_port *dst_port)
925 {
926         struct tb_tunnel *tunnel;
927
928         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
929         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
930 }
931
932 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
933                                       struct tb_port *dst_port)
934 {
935         int ret, available_up, available_down;
936         struct tb_tunnel *tunnel;
937
938         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
939         if (!tunnel)
940                 return;
941
942         tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
943
944         /*
945          * Calculate available bandwidth for the first hop USB3 tunnel.
946          * That determines the whole USB3 bandwidth for this branch.
947          */
948         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
949                                      &available_up, &available_down, false);
950         if (ret) {
951                 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
952                 return;
953         }
954
955         tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
956                       available_down);
957
958         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
959 }
960
961 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
962 {
963         struct tb_switch *parent = tb_switch_parent(sw);
964         int ret, available_up, available_down;
965         struct tb_port *up, *down, *port;
966         struct tb_cm *tcm = tb_priv(tb);
967         struct tb_tunnel *tunnel;
968
969         if (!tb_acpi_may_tunnel_usb3()) {
970                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
971                 return 0;
972         }
973
974         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
975         if (!up)
976                 return 0;
977
978         if (!sw->link_usb4)
979                 return 0;
980
981         /*
982          * Look up available down port. Since we are chaining it should
983          * be found right above this switch.
984          */
985         port = tb_switch_downstream_port(sw);
986         down = tb_find_usb3_down(parent, port);
987         if (!down)
988                 return 0;
989
990         if (tb_route(parent)) {
991                 struct tb_port *parent_up;
992                 /*
993                  * Check first that the parent switch has its upstream USB3
994                  * port enabled. Otherwise the chain is not complete and
995                  * there is no point setting up a new tunnel.
996                  */
997                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
998                 if (!parent_up || !tb_port_is_enabled(parent_up))
999                         return 0;
1000
1001                 /* Make all unused bandwidth available for the new tunnel */
1002                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
1003                 if (ret)
1004                         return ret;
1005         }
1006
1007         ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
1008                                      false);
1009         if (ret)
1010                 goto err_reclaim;
1011
1012         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
1013                     available_up, available_down);
1014
1015         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
1016                                       available_down);
1017         if (!tunnel) {
1018                 ret = -ENOMEM;
1019                 goto err_reclaim;
1020         }
1021
1022         if (tb_tunnel_activate(tunnel)) {
1023                 tb_port_info(up,
1024                              "USB3 tunnel activation failed, aborting\n");
1025                 ret = -EIO;
1026                 goto err_free;
1027         }
1028
1029         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1030         if (tb_route(parent))
1031                 tb_reclaim_usb3_bandwidth(tb, down, up);
1032
1033         return 0;
1034
1035 err_free:
1036         tb_tunnel_free(tunnel);
1037 err_reclaim:
1038         if (tb_route(parent))
1039                 tb_reclaim_usb3_bandwidth(tb, down, up);
1040
1041         return ret;
1042 }
1043
1044 static int tb_create_usb3_tunnels(struct tb_switch *sw)
1045 {
1046         struct tb_port *port;
1047         int ret;
1048
1049         if (!tb_acpi_may_tunnel_usb3())
1050                 return 0;
1051
1052         if (tb_route(sw)) {
1053                 ret = tb_tunnel_usb3(sw->tb, sw);
1054                 if (ret)
1055                         return ret;
1056         }
1057
1058         tb_switch_for_each_port(sw, port) {
1059                 if (!tb_port_has_remote(port))
1060                         continue;
1061                 ret = tb_create_usb3_tunnels(port->remote->sw);
1062                 if (ret)
1063                         return ret;
1064         }
1065
1066         return 0;
1067 }
1068
1069 /**
1070  * tb_configure_asym() - Transition links to asymmetric if needed
1071  * @tb: Domain structure
1072  * @src_port: Source adapter to start the transition
1073  * @dst_port: Destination adapter
1074  * @requested_up: Additional bandwidth (Mb/s) required upstream
1075  * @requested_down: Additional bandwidth (Mb/s) required downstream
1076  *
1077  * Transition links between @src_port and @dst_port into asymmetric, with
1078  * three lanes in the direction from @src_port towards @dst_port and one lane
1079  * in the opposite direction, if the bandwidth requirements
1080  * (requested + currently consumed) on that link exceed @asym_threshold.
1081  *
1082  * Must be called with available >= requested over all links.
1083  */
1084 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1085                              struct tb_port *dst_port, int requested_up,
1086                              int requested_down)
1087 {
1088         bool clx = false, clx_disabled = false, downstream;
1089         struct tb_switch *sw;
1090         struct tb_port *up;
1091         int ret = 0;
1092
1093         if (!asym_threshold)
1094                 return 0;
1095
1096         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1097         /* Pick up router deepest in the hierarchy */
1098         if (downstream)
1099                 sw = dst_port->sw;
1100         else
1101                 sw = src_port->sw;
1102
1103         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1104                 struct tb_port *down = tb_switch_downstream_port(up->sw);
1105                 enum tb_link_width width_up, width_down;
1106                 int consumed_up, consumed_down;
1107
1108                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1109                                                &consumed_up, &consumed_down);
1110                 if (ret)
1111                         break;
1112
1113                 if (downstream) {
1114                         /*
1115                          * Downstream so make sure upstream is within the 36G
1116                          * (40G - guard band 10%), and the requested is above
1117                          * what the threshold is.
1118                          */
1119                         if (consumed_up + requested_up >= TB_ASYM_MIN) {
1120                                 ret = -ENOBUFS;
1121                                 break;
1122                         }
1123                         /* Does consumed + requested exceed the threshold */
1124                         if (consumed_down + requested_down < asym_threshold)
1125                                 continue;
1126
1127                         width_up = TB_LINK_WIDTH_ASYM_RX;
1128                         width_down = TB_LINK_WIDTH_ASYM_TX;
1129                 } else {
1130                         /* Upstream, the opposite of above */
1131                         if (consumed_down + requested_down >= TB_ASYM_MIN) {
1132                                 ret = -ENOBUFS;
1133                                 break;
1134                         }
1135                         if (consumed_up + requested_up < asym_threshold)
1136                                 continue;
1137
1138                         width_up = TB_LINK_WIDTH_ASYM_TX;
1139                         width_down = TB_LINK_WIDTH_ASYM_RX;
1140                 }
1141
1142                 if (up->sw->link_width == width_up)
1143                         continue;
1144
1145                 if (!tb_port_width_supported(up, width_up) ||
1146                     !tb_port_width_supported(down, width_down))
1147                         continue;
1148
1149                 /*
1150                  * Disable CL states before doing any transitions. We
1151                  * delayed it until now that we know there is a real
1152                  * transition taking place.
1153                  */
1154                 if (!clx_disabled) {
1155                         clx = tb_disable_clx(sw);
1156                         clx_disabled = true;
1157                 }
1158
1159                 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1160
1161                 /*
1162                  * Here requested + consumed > threshold so we need to
1163                  * transtion the link into asymmetric now.
1164                  */
1165                 ret = tb_switch_set_link_width(up->sw, width_up);
1166                 if (ret) {
1167                         tb_sw_warn(up->sw, "failed to set link width\n");
1168                         break;
1169                 }
1170         }
1171
1172         /* Re-enable CL states if they were previosly enabled */
1173         if (clx)
1174                 tb_enable_clx(sw);
1175
1176         return ret;
1177 }
1178
1179 /**
1180  * tb_configure_sym() - Transition links to symmetric if possible
1181  * @tb: Domain structure
1182  * @src_port: Source adapter to start the transition
1183  * @dst_port: Destination adapter
1184  * @requested_up: New lower bandwidth request upstream (Mb/s)
1185  * @requested_down: New lower bandwidth request downstream (Mb/s)
1186  * @keep_asym: Keep asymmetric link if preferred
1187  *
1188  * Goes over each link from @src_port to @dst_port and tries to
1189  * transition the link to symmetric if the currently consumed bandwidth
1190  * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1191  */
1192 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1193                             struct tb_port *dst_port, int requested_up,
1194                             int requested_down, bool keep_asym)
1195 {
1196         bool clx = false, clx_disabled = false, downstream;
1197         struct tb_switch *sw;
1198         struct tb_port *up;
1199         int ret = 0;
1200
1201         if (!asym_threshold)
1202                 return 0;
1203
1204         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1205         /* Pick up router deepest in the hierarchy */
1206         if (downstream)
1207                 sw = dst_port->sw;
1208         else
1209                 sw = src_port->sw;
1210
1211         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1212                 int consumed_up, consumed_down;
1213
1214                 /* Already symmetric */
1215                 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1216                         continue;
1217                 /* Unplugged, no need to switch */
1218                 if (up->sw->is_unplugged)
1219                         continue;
1220
1221                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1222                                                &consumed_up, &consumed_down);
1223                 if (ret)
1224                         break;
1225
1226                 if (downstream) {
1227                         /*
1228                          * Downstream so we want the consumed_down < threshold.
1229                          * Upstream traffic should be less than 36G (40G
1230                          * guard band 10%) as the link was configured asymmetric
1231                          * already.
1232                          */
1233                         if (consumed_down + requested_down >= asym_threshold)
1234                                 continue;
1235                 } else {
1236                         if (consumed_up + requested_up >= asym_threshold)
1237                                 continue;
1238                 }
1239
1240                 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1241                         continue;
1242
1243                 /*
1244                  * Here consumed < threshold so we can transition the
1245                  * link to symmetric.
1246                  *
1247                  * However, if the router prefers asymmetric link we
1248                  * honor that (unless @keep_asym is %false).
1249                  */
1250                 if (keep_asym &&
1251                     up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1252                         tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1253                         continue;
1254                 }
1255
1256                 /* Disable CL states before doing any transitions */
1257                 if (!clx_disabled) {
1258                         clx = tb_disable_clx(sw);
1259                         clx_disabled = true;
1260                 }
1261
1262                 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1263
1264                 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1265                 if (ret) {
1266                         tb_sw_warn(up->sw, "failed to set link width\n");
1267                         break;
1268                 }
1269         }
1270
1271         /* Re-enable CL states if they were previosly enabled */
1272         if (clx)
1273                 tb_enable_clx(sw);
1274
1275         return ret;
1276 }
1277
1278 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1279                               struct tb_switch *sw)
1280 {
1281         struct tb *tb = sw->tb;
1282
1283         /* Link the routers using both links if available */
1284         down->remote = up;
1285         up->remote = down;
1286         if (down->dual_link_port && up->dual_link_port) {
1287                 down->dual_link_port->remote = up->dual_link_port;
1288                 up->dual_link_port->remote = down->dual_link_port;
1289         }
1290
1291         /*
1292          * Enable lane bonding if the link is currently two single lane
1293          * links.
1294          */
1295         if (sw->link_width < TB_LINK_WIDTH_DUAL)
1296                 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1297
1298         /*
1299          * Device router that comes up as symmetric link is
1300          * connected deeper in the hierarchy, we transition the links
1301          * above into symmetric if bandwidth allows.
1302          */
1303         if (tb_switch_depth(sw) > 1 &&
1304             tb_port_get_link_generation(up) >= 4 &&
1305             up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1306                 struct tb_port *host_port;
1307
1308                 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1309                 tb_configure_sym(tb, host_port, up, 0, 0, false);
1310         }
1311
1312         /* Set the link configured */
1313         tb_switch_configure_link(sw);
1314 }
1315
1316 static void tb_scan_port(struct tb_port *port);
1317
1318 /*
1319  * tb_scan_switch() - scan for and initialize downstream switches
1320  */
1321 static void tb_scan_switch(struct tb_switch *sw)
1322 {
1323         struct tb_port *port;
1324
1325         pm_runtime_get_sync(&sw->dev);
1326
1327         tb_switch_for_each_port(sw, port)
1328                 tb_scan_port(port);
1329
1330         pm_runtime_mark_last_busy(&sw->dev);
1331         pm_runtime_put_autosuspend(&sw->dev);
1332 }
1333
1334 /*
1335  * tb_scan_port() - check for and initialize switches below port
1336  */
1337 static void tb_scan_port(struct tb_port *port)
1338 {
1339         struct tb_cm *tcm = tb_priv(port->sw->tb);
1340         struct tb_port *upstream_port;
1341         bool discovery = false;
1342         struct tb_switch *sw;
1343
1344         if (tb_is_upstream_port(port))
1345                 return;
1346
1347         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1348             !tb_dp_port_is_enabled(port)) {
1349                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1350                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1351                                  false);
1352                 return;
1353         }
1354
1355         if (port->config.type != TB_TYPE_PORT)
1356                 return;
1357         if (port->dual_link_port && port->link_nr)
1358                 return; /*
1359                          * Downstream switch is reachable through two ports.
1360                          * Only scan on the primary port (link_nr == 0).
1361                          */
1362
1363         if (port->usb4)
1364                 pm_runtime_get_sync(&port->usb4->dev);
1365
1366         if (tb_wait_for_port(port, false) <= 0)
1367                 goto out_rpm_put;
1368         if (port->remote) {
1369                 tb_port_dbg(port, "port already has a remote\n");
1370                 goto out_rpm_put;
1371         }
1372
1373         tb_retimer_scan(port, true);
1374
1375         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1376                              tb_downstream_route(port));
1377         if (IS_ERR(sw)) {
1378                 /*
1379                  * If there is an error accessing the connected switch
1380                  * it may be connected to another domain. Also we allow
1381                  * the other domain to be connected to a max depth switch.
1382                  */
1383                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1384                         tb_scan_xdomain(port);
1385                 goto out_rpm_put;
1386         }
1387
1388         if (tb_switch_configure(sw)) {
1389                 tb_switch_put(sw);
1390                 goto out_rpm_put;
1391         }
1392
1393         /*
1394          * If there was previously another domain connected remove it
1395          * first.
1396          */
1397         if (port->xdomain) {
1398                 tb_xdomain_remove(port->xdomain);
1399                 tb_port_unconfigure_xdomain(port);
1400                 port->xdomain = NULL;
1401         }
1402
1403         /*
1404          * Do not send uevents until we have discovered all existing
1405          * tunnels and know which switches were authorized already by
1406          * the boot firmware.
1407          */
1408         if (!tcm->hotplug_active) {
1409                 dev_set_uevent_suppress(&sw->dev, true);
1410                 discovery = true;
1411         }
1412
1413         /*
1414          * At the moment Thunderbolt 2 and beyond (devices with LC) we
1415          * can support runtime PM.
1416          */
1417         sw->rpm = sw->generation > 1;
1418
1419         if (tb_switch_add(sw)) {
1420                 tb_switch_put(sw);
1421                 goto out_rpm_put;
1422         }
1423
1424         upstream_port = tb_upstream_port(sw);
1425         tb_configure_link(port, upstream_port, sw);
1426
1427         /*
1428          * CL0s and CL1 are enabled and supported together.
1429          * Silently ignore CLx enabling in case CLx is not supported.
1430          */
1431         if (discovery)
1432                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1433         else if (tb_enable_clx(sw))
1434                 tb_sw_warn(sw, "failed to enable CL states\n");
1435
1436         if (tb_enable_tmu(sw))
1437                 tb_sw_warn(sw, "failed to enable TMU\n");
1438
1439         /*
1440          * Configuration valid needs to be set after the TMU has been
1441          * enabled for the upstream port of the router so we do it here.
1442          */
1443         tb_switch_configuration_valid(sw);
1444
1445         /* Scan upstream retimers */
1446         tb_retimer_scan(upstream_port, true);
1447
1448         /*
1449          * Create USB 3.x tunnels only when the switch is plugged to the
1450          * domain. This is because we scan the domain also during discovery
1451          * and want to discover existing USB 3.x tunnels before we create
1452          * any new.
1453          */
1454         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1455                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1456
1457         tb_add_dp_resources(sw);
1458         tb_scan_switch(sw);
1459
1460 out_rpm_put:
1461         if (port->usb4) {
1462                 pm_runtime_mark_last_busy(&port->usb4->dev);
1463                 pm_runtime_put_autosuspend(&port->usb4->dev);
1464         }
1465 }
1466
1467 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1468 {
1469         struct tb_port *src_port, *dst_port;
1470         struct tb *tb;
1471
1472         if (!tunnel)
1473                 return;
1474
1475         tb_tunnel_deactivate(tunnel);
1476         list_del(&tunnel->list);
1477
1478         tb = tunnel->tb;
1479         src_port = tunnel->src_port;
1480         dst_port = tunnel->dst_port;
1481
1482         switch (tunnel->type) {
1483         case TB_TUNNEL_DP:
1484                 tb_detach_bandwidth_group(src_port);
1485                 /*
1486                  * In case of DP tunnel make sure the DP IN resource is
1487                  * deallocated properly.
1488                  */
1489                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1490                 /*
1491                  * If bandwidth on a link is < asym_threshold
1492                  * transition the link to symmetric.
1493                  */
1494                 tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
1495                 /* Now we can allow the domain to runtime suspend again */
1496                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1497                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1498                 pm_runtime_mark_last_busy(&src_port->sw->dev);
1499                 pm_runtime_put_autosuspend(&src_port->sw->dev);
1500                 fallthrough;
1501
1502         case TB_TUNNEL_USB3:
1503                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1504                 break;
1505
1506         default:
1507                 /*
1508                  * PCIe and DMA tunnels do not consume guaranteed
1509                  * bandwidth.
1510                  */
1511                 break;
1512         }
1513
1514         tb_tunnel_free(tunnel);
1515 }
1516
1517 /*
1518  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1519  */
1520 static void tb_free_invalid_tunnels(struct tb *tb)
1521 {
1522         struct tb_cm *tcm = tb_priv(tb);
1523         struct tb_tunnel *tunnel;
1524         struct tb_tunnel *n;
1525
1526         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1527                 if (tb_tunnel_is_invalid(tunnel))
1528                         tb_deactivate_and_free_tunnel(tunnel);
1529         }
1530 }
1531
1532 /*
1533  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1534  */
1535 static void tb_free_unplugged_children(struct tb_switch *sw)
1536 {
1537         struct tb_port *port;
1538
1539         tb_switch_for_each_port(sw, port) {
1540                 if (!tb_port_has_remote(port))
1541                         continue;
1542
1543                 if (port->remote->sw->is_unplugged) {
1544                         tb_retimer_remove_all(port);
1545                         tb_remove_dp_resources(port->remote->sw);
1546                         tb_switch_unconfigure_link(port->remote->sw);
1547                         tb_switch_set_link_width(port->remote->sw,
1548                                                  TB_LINK_WIDTH_SINGLE);
1549                         tb_switch_remove(port->remote->sw);
1550                         port->remote = NULL;
1551                         if (port->dual_link_port)
1552                                 port->dual_link_port->remote = NULL;
1553                 } else {
1554                         tb_free_unplugged_children(port->remote->sw);
1555                 }
1556         }
1557 }
1558
1559 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1560                                          const struct tb_port *port)
1561 {
1562         struct tb_port *down = NULL;
1563
1564         /*
1565          * To keep plugging devices consistently in the same PCIe
1566          * hierarchy, do mapping here for switch downstream PCIe ports.
1567          */
1568         if (tb_switch_is_usb4(sw)) {
1569                 down = usb4_switch_map_pcie_down(sw, port);
1570         } else if (!tb_route(sw)) {
1571                 int phy_port = tb_phy_port_from_link(port->port);
1572                 int index;
1573
1574                 /*
1575                  * Hard-coded Thunderbolt port to PCIe down port mapping
1576                  * per controller.
1577                  */
1578                 if (tb_switch_is_cactus_ridge(sw) ||
1579                     tb_switch_is_alpine_ridge(sw))
1580                         index = !phy_port ? 6 : 7;
1581                 else if (tb_switch_is_falcon_ridge(sw))
1582                         index = !phy_port ? 6 : 8;
1583                 else if (tb_switch_is_titan_ridge(sw))
1584                         index = !phy_port ? 8 : 9;
1585                 else
1586                         goto out;
1587
1588                 /* Validate the hard-coding */
1589                 if (WARN_ON(index > sw->config.max_port_number))
1590                         goto out;
1591
1592                 down = &sw->ports[index];
1593         }
1594
1595         if (down) {
1596                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1597                         goto out;
1598                 if (tb_pci_port_is_enabled(down))
1599                         goto out;
1600
1601                 return down;
1602         }
1603
1604 out:
1605         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1606 }
1607
1608 static void
1609 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1610 {
1611         struct tb_tunnel *first_tunnel;
1612         struct tb *tb = group->tb;
1613         struct tb_port *in;
1614         int ret;
1615
1616         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1617                group->index);
1618
1619         first_tunnel = NULL;
1620         list_for_each_entry(in, &group->ports, group_list) {
1621                 int estimated_bw, estimated_up, estimated_down;
1622                 struct tb_tunnel *tunnel;
1623                 struct tb_port *out;
1624
1625                 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1626                         continue;
1627
1628                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1629                 if (WARN_ON(!tunnel))
1630                         break;
1631
1632                 if (!first_tunnel) {
1633                         /*
1634                          * Since USB3 bandwidth is shared by all DP
1635                          * tunnels under the host router USB4 port, even
1636                          * if they do not begin from the host router, we
1637                          * can release USB3 bandwidth just once and not
1638                          * for each tunnel separately.
1639                          */
1640                         first_tunnel = tunnel;
1641                         ret = tb_release_unused_usb3_bandwidth(tb,
1642                                 first_tunnel->src_port, first_tunnel->dst_port);
1643                         if (ret) {
1644                                 tb_tunnel_warn(tunnel,
1645                                         "failed to release unused bandwidth\n");
1646                                 break;
1647                         }
1648                 }
1649
1650                 out = tunnel->dst_port;
1651                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1652                                              &estimated_down, true);
1653                 if (ret) {
1654                         tb_tunnel_warn(tunnel,
1655                                 "failed to re-calculate estimated bandwidth\n");
1656                         break;
1657                 }
1658
1659                 /*
1660                  * Estimated bandwidth includes:
1661                  *  - already allocated bandwidth for the DP tunnel
1662                  *  - available bandwidth along the path
1663                  *  - bandwidth allocated for USB 3.x but not used.
1664                  */
1665                 tb_tunnel_dbg(tunnel,
1666                               "re-calculated estimated bandwidth %u/%u Mb/s\n",
1667                               estimated_up, estimated_down);
1668
1669                 if (tb_port_path_direction_downstream(in, out))
1670                         estimated_bw = estimated_down;
1671                 else
1672                         estimated_bw = estimated_up;
1673
1674                 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1675                         tb_tunnel_warn(tunnel,
1676                                        "failed to update estimated bandwidth\n");
1677         }
1678
1679         if (first_tunnel)
1680                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1681                                           first_tunnel->dst_port);
1682
1683         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1684 }
1685
1686 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1687 {
1688         struct tb_cm *tcm = tb_priv(tb);
1689         int i;
1690
1691         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1692
1693         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1694                 struct tb_bandwidth_group *group = &tcm->groups[i];
1695
1696                 if (!list_empty(&group->ports))
1697                         tb_recalc_estimated_bandwidth_for_group(group);
1698         }
1699
1700         tb_dbg(tb, "bandwidth re-calculation done\n");
1701 }
1702
1703 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1704 {
1705         struct tb_port *host_port, *port;
1706         struct tb_cm *tcm = tb_priv(tb);
1707
1708         host_port = tb_route(in->sw) ?
1709                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1710
1711         list_for_each_entry(port, &tcm->dp_resources, list) {
1712                 if (!tb_port_is_dpout(port))
1713                         continue;
1714
1715                 if (tb_port_is_enabled(port)) {
1716                         tb_port_dbg(port, "DP OUT in use\n");
1717                         continue;
1718                 }
1719
1720                 tb_port_dbg(port, "DP OUT available\n");
1721
1722                 /*
1723                  * Keep the DP tunnel under the topology starting from
1724                  * the same host router downstream port.
1725                  */
1726                 if (host_port && tb_route(port->sw)) {
1727                         struct tb_port *p;
1728
1729                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1730                         if (p != host_port)
1731                                 continue;
1732                 }
1733
1734                 return port;
1735         }
1736
1737         return NULL;
1738 }
1739
1740 static bool tb_tunnel_one_dp(struct tb *tb)
1741 {
1742         int available_up, available_down, ret, link_nr;
1743         struct tb_cm *tcm = tb_priv(tb);
1744         struct tb_port *port, *in, *out;
1745         int consumed_up, consumed_down;
1746         struct tb_tunnel *tunnel;
1747
1748         /*
1749          * Find pair of inactive DP IN and DP OUT adapters and then
1750          * establish a DP tunnel between them.
1751          */
1752         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1753
1754         in = NULL;
1755         out = NULL;
1756         list_for_each_entry(port, &tcm->dp_resources, list) {
1757                 if (!tb_port_is_dpin(port))
1758                         continue;
1759
1760                 if (tb_port_is_enabled(port)) {
1761                         tb_port_dbg(port, "DP IN in use\n");
1762                         continue;
1763                 }
1764
1765                 in = port;
1766                 tb_port_dbg(in, "DP IN available\n");
1767
1768                 out = tb_find_dp_out(tb, port);
1769                 if (out)
1770                         break;
1771         }
1772
1773         if (!in) {
1774                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1775                 return false;
1776         }
1777         if (!out) {
1778                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1779                 return false;
1780         }
1781
1782         /*
1783          * This is only applicable to links that are not bonded (so
1784          * when Thunderbolt 1 hardware is involved somewhere in the
1785          * topology). For these try to share the DP bandwidth between
1786          * the two lanes.
1787          */
1788         link_nr = 1;
1789         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1790                 if (tb_tunnel_is_dp(tunnel)) {
1791                         link_nr = 0;
1792                         break;
1793                 }
1794         }
1795
1796         /*
1797          * DP stream needs the domain to be active so runtime resume
1798          * both ends of the tunnel.
1799          *
1800          * This should bring the routers in the middle active as well
1801          * and keeps the domain from runtime suspending while the DP
1802          * tunnel is active.
1803          */
1804         pm_runtime_get_sync(&in->sw->dev);
1805         pm_runtime_get_sync(&out->sw->dev);
1806
1807         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1808                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1809                 goto err_rpm_put;
1810         }
1811
1812         if (!tb_attach_bandwidth_group(tcm, in, out))
1813                 goto err_dealloc_dp;
1814
1815         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1816         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1817         if (ret) {
1818                 tb_warn(tb, "failed to release unused bandwidth\n");
1819                 goto err_detach_group;
1820         }
1821
1822         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1823                                      true);
1824         if (ret)
1825                 goto err_reclaim_usb;
1826
1827         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1828                available_up, available_down);
1829
1830         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1831                                     available_down);
1832         if (!tunnel) {
1833                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1834                 goto err_reclaim_usb;
1835         }
1836
1837         if (tb_tunnel_activate(tunnel)) {
1838                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1839                 goto err_free;
1840         }
1841
1842         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1843         tb_reclaim_usb3_bandwidth(tb, in, out);
1844
1845         /*
1846          * Transition the links to asymmetric if the consumption exceeds
1847          * the threshold.
1848          */
1849         if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
1850                 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1851
1852         /* Update the domain with the new bandwidth estimation */
1853         tb_recalc_estimated_bandwidth(tb);
1854
1855         /*
1856          * In case of DP tunnel exists, change host router's 1st children
1857          * TMU mode to HiFi for CL0s to work.
1858          */
1859         tb_increase_tmu_accuracy(tunnel);
1860         return true;
1861
1862 err_free:
1863         tb_tunnel_free(tunnel);
1864 err_reclaim_usb:
1865         tb_reclaim_usb3_bandwidth(tb, in, out);
1866 err_detach_group:
1867         tb_detach_bandwidth_group(in);
1868 err_dealloc_dp:
1869         tb_switch_dealloc_dp_resource(in->sw, in);
1870 err_rpm_put:
1871         pm_runtime_mark_last_busy(&out->sw->dev);
1872         pm_runtime_put_autosuspend(&out->sw->dev);
1873         pm_runtime_mark_last_busy(&in->sw->dev);
1874         pm_runtime_put_autosuspend(&in->sw->dev);
1875
1876         return false;
1877 }
1878
1879 static void tb_tunnel_dp(struct tb *tb)
1880 {
1881         if (!tb_acpi_may_tunnel_dp()) {
1882                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1883                 return;
1884         }
1885
1886         while (tb_tunnel_one_dp(tb))
1887                 ;
1888 }
1889
1890 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1891 {
1892         struct tb_port *in, *out;
1893         struct tb_tunnel *tunnel;
1894
1895         if (tb_port_is_dpin(port)) {
1896                 tb_port_dbg(port, "DP IN resource unavailable\n");
1897                 in = port;
1898                 out = NULL;
1899         } else {
1900                 tb_port_dbg(port, "DP OUT resource unavailable\n");
1901                 in = NULL;
1902                 out = port;
1903         }
1904
1905         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1906         tb_deactivate_and_free_tunnel(tunnel);
1907         list_del_init(&port->list);
1908
1909         /*
1910          * See if there is another DP OUT port that can be used for
1911          * to create another tunnel.
1912          */
1913         tb_recalc_estimated_bandwidth(tb);
1914         tb_tunnel_dp(tb);
1915 }
1916
1917 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1918 {
1919         struct tb_cm *tcm = tb_priv(tb);
1920         struct tb_port *p;
1921
1922         if (tb_port_is_enabled(port))
1923                 return;
1924
1925         list_for_each_entry(p, &tcm->dp_resources, list) {
1926                 if (p == port)
1927                         return;
1928         }
1929
1930         tb_port_dbg(port, "DP %s resource available after hotplug\n",
1931                     tb_port_is_dpin(port) ? "IN" : "OUT");
1932         list_add_tail(&port->list, &tcm->dp_resources);
1933
1934         /* Look for suitable DP IN <-> DP OUT pairs now */
1935         tb_tunnel_dp(tb);
1936 }
1937
1938 static void tb_disconnect_and_release_dp(struct tb *tb)
1939 {
1940         struct tb_cm *tcm = tb_priv(tb);
1941         struct tb_tunnel *tunnel, *n;
1942
1943         /*
1944          * Tear down all DP tunnels and release their resources. They
1945          * will be re-established after resume based on plug events.
1946          */
1947         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1948                 if (tb_tunnel_is_dp(tunnel))
1949                         tb_deactivate_and_free_tunnel(tunnel);
1950         }
1951
1952         while (!list_empty(&tcm->dp_resources)) {
1953                 struct tb_port *port;
1954
1955                 port = list_first_entry(&tcm->dp_resources,
1956                                         struct tb_port, list);
1957                 list_del_init(&port->list);
1958         }
1959 }
1960
1961 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1962 {
1963         struct tb_tunnel *tunnel;
1964         struct tb_port *up;
1965
1966         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1967         if (WARN_ON(!up))
1968                 return -ENODEV;
1969
1970         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1971         if (WARN_ON(!tunnel))
1972                 return -ENODEV;
1973
1974         tb_switch_xhci_disconnect(sw);
1975
1976         tb_tunnel_deactivate(tunnel);
1977         list_del(&tunnel->list);
1978         tb_tunnel_free(tunnel);
1979         return 0;
1980 }
1981
1982 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1983 {
1984         struct tb_port *up, *down, *port;
1985         struct tb_cm *tcm = tb_priv(tb);
1986         struct tb_tunnel *tunnel;
1987
1988         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1989         if (!up)
1990                 return 0;
1991
1992         /*
1993          * Look up available down port. Since we are chaining it should
1994          * be found right above this switch.
1995          */
1996         port = tb_switch_downstream_port(sw);
1997         down = tb_find_pcie_down(tb_switch_parent(sw), port);
1998         if (!down)
1999                 return 0;
2000
2001         tunnel = tb_tunnel_alloc_pci(tb, up, down);
2002         if (!tunnel)
2003                 return -ENOMEM;
2004
2005         if (tb_tunnel_activate(tunnel)) {
2006                 tb_port_info(up,
2007                              "PCIe tunnel activation failed, aborting\n");
2008                 tb_tunnel_free(tunnel);
2009                 return -EIO;
2010         }
2011
2012         /*
2013          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2014          * here.
2015          */
2016         if (tb_switch_pcie_l1_enable(sw))
2017                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2018
2019         if (tb_switch_xhci_connect(sw))
2020                 tb_sw_warn(sw, "failed to connect xHCI\n");
2021
2022         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2023         return 0;
2024 }
2025
2026 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2027                                     int transmit_path, int transmit_ring,
2028                                     int receive_path, int receive_ring)
2029 {
2030         struct tb_cm *tcm = tb_priv(tb);
2031         struct tb_port *nhi_port, *dst_port;
2032         struct tb_tunnel *tunnel;
2033         struct tb_switch *sw;
2034         int ret;
2035
2036         sw = tb_to_switch(xd->dev.parent);
2037         dst_port = tb_port_at(xd->route, sw);
2038         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2039
2040         mutex_lock(&tb->lock);
2041
2042         /*
2043          * When tunneling DMA paths the link should not enter CL states
2044          * so disable them now.
2045          */
2046         tb_disable_clx(sw);
2047
2048         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2049                                      transmit_ring, receive_path, receive_ring);
2050         if (!tunnel) {
2051                 ret = -ENOMEM;
2052                 goto err_clx;
2053         }
2054
2055         if (tb_tunnel_activate(tunnel)) {
2056                 tb_port_info(nhi_port,
2057                              "DMA tunnel activation failed, aborting\n");
2058                 ret = -EIO;
2059                 goto err_free;
2060         }
2061
2062         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2063         mutex_unlock(&tb->lock);
2064         return 0;
2065
2066 err_free:
2067         tb_tunnel_free(tunnel);
2068 err_clx:
2069         tb_enable_clx(sw);
2070         mutex_unlock(&tb->lock);
2071
2072         return ret;
2073 }
2074
2075 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2076                                           int transmit_path, int transmit_ring,
2077                                           int receive_path, int receive_ring)
2078 {
2079         struct tb_cm *tcm = tb_priv(tb);
2080         struct tb_port *nhi_port, *dst_port;
2081         struct tb_tunnel *tunnel, *n;
2082         struct tb_switch *sw;
2083
2084         sw = tb_to_switch(xd->dev.parent);
2085         dst_port = tb_port_at(xd->route, sw);
2086         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2087
2088         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2089                 if (!tb_tunnel_is_dma(tunnel))
2090                         continue;
2091                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2092                         continue;
2093
2094                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2095                                         receive_path, receive_ring))
2096                         tb_deactivate_and_free_tunnel(tunnel);
2097         }
2098
2099         /*
2100          * Try to re-enable CL states now, it is OK if this fails
2101          * because we may still have another DMA tunnel active through
2102          * the same host router USB4 downstream port.
2103          */
2104         tb_enable_clx(sw);
2105 }
2106
2107 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2108                                        int transmit_path, int transmit_ring,
2109                                        int receive_path, int receive_ring)
2110 {
2111         if (!xd->is_unplugged) {
2112                 mutex_lock(&tb->lock);
2113                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2114                                               transmit_ring, receive_path,
2115                                               receive_ring);
2116                 mutex_unlock(&tb->lock);
2117         }
2118         return 0;
2119 }
2120
2121 /* hotplug handling */
2122
2123 /*
2124  * tb_handle_hotplug() - handle hotplug event
2125  *
2126  * Executes on tb->wq.
2127  */
2128 static void tb_handle_hotplug(struct work_struct *work)
2129 {
2130         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2131         struct tb *tb = ev->tb;
2132         struct tb_cm *tcm = tb_priv(tb);
2133         struct tb_switch *sw;
2134         struct tb_port *port;
2135
2136         /* Bring the domain back from sleep if it was suspended */
2137         pm_runtime_get_sync(&tb->dev);
2138
2139         mutex_lock(&tb->lock);
2140         if (!tcm->hotplug_active)
2141                 goto out; /* during init, suspend or shutdown */
2142
2143         sw = tb_switch_find_by_route(tb, ev->route);
2144         if (!sw) {
2145                 tb_warn(tb,
2146                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2147                         ev->route, ev->port, ev->unplug);
2148                 goto out;
2149         }
2150         if (ev->port > sw->config.max_port_number) {
2151                 tb_warn(tb,
2152                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2153                         ev->route, ev->port, ev->unplug);
2154                 goto put_sw;
2155         }
2156         port = &sw->ports[ev->port];
2157         if (tb_is_upstream_port(port)) {
2158                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2159                        ev->route, ev->port, ev->unplug);
2160                 goto put_sw;
2161         }
2162
2163         pm_runtime_get_sync(&sw->dev);
2164
2165         if (ev->unplug) {
2166                 tb_retimer_remove_all(port);
2167
2168                 if (tb_port_has_remote(port)) {
2169                         tb_port_dbg(port, "switch unplugged\n");
2170                         tb_sw_set_unplugged(port->remote->sw);
2171                         tb_free_invalid_tunnels(tb);
2172                         tb_remove_dp_resources(port->remote->sw);
2173                         tb_switch_tmu_disable(port->remote->sw);
2174                         tb_switch_unconfigure_link(port->remote->sw);
2175                         tb_switch_set_link_width(port->remote->sw,
2176                                                  TB_LINK_WIDTH_SINGLE);
2177                         tb_switch_remove(port->remote->sw);
2178                         port->remote = NULL;
2179                         if (port->dual_link_port)
2180                                 port->dual_link_port->remote = NULL;
2181                         /* Maybe we can create another DP tunnel */
2182                         tb_recalc_estimated_bandwidth(tb);
2183                         tb_tunnel_dp(tb);
2184                 } else if (port->xdomain) {
2185                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2186
2187                         tb_port_dbg(port, "xdomain unplugged\n");
2188                         /*
2189                          * Service drivers are unbound during
2190                          * tb_xdomain_remove() so setting XDomain as
2191                          * unplugged here prevents deadlock if they call
2192                          * tb_xdomain_disable_paths(). We will tear down
2193                          * all the tunnels below.
2194                          */
2195                         xd->is_unplugged = true;
2196                         tb_xdomain_remove(xd);
2197                         port->xdomain = NULL;
2198                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2199                         tb_xdomain_put(xd);
2200                         tb_port_unconfigure_xdomain(port);
2201                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2202                         tb_dp_resource_unavailable(tb, port);
2203                 } else if (!port->port) {
2204                         tb_sw_dbg(sw, "xHCI disconnect request\n");
2205                         tb_switch_xhci_disconnect(sw);
2206                 } else {
2207                         tb_port_dbg(port,
2208                                    "got unplug event for disconnected port, ignoring\n");
2209                 }
2210         } else if (port->remote) {
2211                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2212         } else if (!port->port && sw->authorized) {
2213                 tb_sw_dbg(sw, "xHCI connect request\n");
2214                 tb_switch_xhci_connect(sw);
2215         } else {
2216                 if (tb_port_is_null(port)) {
2217                         tb_port_dbg(port, "hotplug: scanning\n");
2218                         tb_scan_port(port);
2219                         if (!port->remote)
2220                                 tb_port_dbg(port, "hotplug: no switch found\n");
2221                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2222                         tb_dp_resource_available(tb, port);
2223                 }
2224         }
2225
2226         pm_runtime_mark_last_busy(&sw->dev);
2227         pm_runtime_put_autosuspend(&sw->dev);
2228
2229 put_sw:
2230         tb_switch_put(sw);
2231 out:
2232         mutex_unlock(&tb->lock);
2233
2234         pm_runtime_mark_last_busy(&tb->dev);
2235         pm_runtime_put_autosuspend(&tb->dev);
2236
2237         kfree(ev);
2238 }
2239
2240 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2241                                  int *requested_down)
2242 {
2243         int allocated_up, allocated_down, available_up, available_down, ret;
2244         int requested_up_corrected, requested_down_corrected, granularity;
2245         int max_up, max_down, max_up_rounded, max_down_rounded;
2246         struct tb *tb = tunnel->tb;
2247         struct tb_port *in, *out;
2248
2249         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2250         if (ret)
2251                 return ret;
2252
2253         in = tunnel->src_port;
2254         out = tunnel->dst_port;
2255
2256         tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2257                       allocated_up, allocated_down);
2258
2259         /*
2260          * If we get rounded up request from graphics side, say HBR2 x 4
2261          * that is 17500 instead of 17280 (this is because of the
2262          * granularity), we allow it too. Here the graphics has already
2263          * negotiated with the DPRX the maximum possible rates (which is
2264          * 17280 in this case).
2265          *
2266          * Since the link cannot go higher than 17280 we use that in our
2267          * calculations but the DP IN adapter Allocated BW write must be
2268          * the same value (17500) otherwise the adapter will mark it as
2269          * failed for graphics.
2270          */
2271         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2272         if (ret)
2273                 return ret;
2274
2275         ret = usb4_dp_port_granularity(in);
2276         if (ret < 0)
2277                 return ret;
2278         granularity = ret;
2279
2280         max_up_rounded = roundup(max_up, granularity);
2281         max_down_rounded = roundup(max_down, granularity);
2282
2283         /*
2284          * This will "fix" the request down to the maximum supported
2285          * rate * lanes if it is at the maximum rounded up level.
2286          */
2287         requested_up_corrected = *requested_up;
2288         if (requested_up_corrected == max_up_rounded)
2289                 requested_up_corrected = max_up;
2290         else if (requested_up_corrected < 0)
2291                 requested_up_corrected = 0;
2292         requested_down_corrected = *requested_down;
2293         if (requested_down_corrected == max_down_rounded)
2294                 requested_down_corrected = max_down;
2295         else if (requested_down_corrected < 0)
2296                 requested_down_corrected = 0;
2297
2298         tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2299                       requested_up_corrected, requested_down_corrected);
2300
2301         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2302             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2303                 tb_tunnel_dbg(tunnel,
2304                               "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2305                               requested_up_corrected, requested_down_corrected,
2306                               max_up_rounded, max_down_rounded);
2307                 return -ENOBUFS;
2308         }
2309
2310         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2311             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2312                 /*
2313                  * If bandwidth on a link is < asym_threshold transition
2314                  * the link to symmetric.
2315                  */
2316                 tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
2317                 /*
2318                  * If requested bandwidth is less or equal than what is
2319                  * currently allocated to that tunnel we simply change
2320                  * the reservation of the tunnel. Since all the tunnels
2321                  * going out from the same USB4 port are in the same
2322                  * group the released bandwidth will be taken into
2323                  * account for the other tunnels automatically below.
2324                  */
2325                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2326                                                  requested_down);
2327         }
2328
2329         /*
2330          * More bandwidth is requested. Release all the potential
2331          * bandwidth from USB3 first.
2332          */
2333         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2334         if (ret)
2335                 return ret;
2336
2337         /*
2338          * Then go over all tunnels that cross the same USB4 ports (they
2339          * are also in the same group but we use the same function here
2340          * that we use with the normal bandwidth allocation).
2341          */
2342         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2343                                      true);
2344         if (ret)
2345                 goto reclaim;
2346
2347         tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
2348                       available_up, available_down);
2349
2350         if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
2351             (*requested_down >= 0 && available_down >= requested_down_corrected)) {
2352                 /*
2353                  * If bandwidth on a link is >= asym_threshold
2354                  * transition the link to asymmetric.
2355                  */
2356                 ret = tb_configure_asym(tb, in, out, *requested_up,
2357                                         *requested_down);
2358                 if (ret) {
2359                         tb_configure_sym(tb, in, out, 0, 0, true);
2360                         return ret;
2361                 }
2362
2363                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2364                                                 requested_down);
2365                 if (ret) {
2366                         tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2367                         tb_configure_sym(tb, in, out, 0, 0, true);
2368                 }
2369         } else {
2370                 ret = -ENOBUFS;
2371         }
2372
2373 reclaim:
2374         tb_reclaim_usb3_bandwidth(tb, in, out);
2375         return ret;
2376 }
2377
2378 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2379 {
2380         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2381         int requested_bw, requested_up, requested_down, ret;
2382         struct tb_port *in, *out;
2383         struct tb_tunnel *tunnel;
2384         struct tb *tb = ev->tb;
2385         struct tb_cm *tcm = tb_priv(tb);
2386         struct tb_switch *sw;
2387
2388         pm_runtime_get_sync(&tb->dev);
2389
2390         mutex_lock(&tb->lock);
2391         if (!tcm->hotplug_active)
2392                 goto unlock;
2393
2394         sw = tb_switch_find_by_route(tb, ev->route);
2395         if (!sw) {
2396                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2397                         ev->route);
2398                 goto unlock;
2399         }
2400
2401         in = &sw->ports[ev->port];
2402         if (!tb_port_is_dpin(in)) {
2403                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2404                 goto put_sw;
2405         }
2406
2407         tb_port_dbg(in, "handling bandwidth allocation request\n");
2408
2409         if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2410                 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2411                 goto put_sw;
2412         }
2413
2414         ret = usb4_dp_port_requested_bandwidth(in);
2415         if (ret < 0) {
2416                 if (ret == -ENODATA)
2417                         tb_port_dbg(in, "no bandwidth request active\n");
2418                 else
2419                         tb_port_warn(in, "failed to read requested bandwidth\n");
2420                 goto put_sw;
2421         }
2422         requested_bw = ret;
2423
2424         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2425
2426         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2427         if (!tunnel) {
2428                 tb_port_warn(in, "failed to find tunnel\n");
2429                 goto put_sw;
2430         }
2431
2432         out = tunnel->dst_port;
2433
2434         if (tb_port_path_direction_downstream(in, out)) {
2435                 requested_up = -1;
2436                 requested_down = requested_bw;
2437         } else {
2438                 requested_up = requested_bw;
2439                 requested_down = -1;
2440         }
2441
2442         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2443         if (ret) {
2444                 if (ret == -ENOBUFS)
2445                         tb_tunnel_warn(tunnel,
2446                                        "not enough bandwidth available\n");
2447                 else
2448                         tb_tunnel_warn(tunnel,
2449                                        "failed to change bandwidth allocation\n");
2450         } else {
2451                 tb_tunnel_dbg(tunnel,
2452                               "bandwidth allocation changed to %d/%d Mb/s\n",
2453                               requested_up, requested_down);
2454
2455                 /* Update other clients about the allocation change */
2456                 tb_recalc_estimated_bandwidth(tb);
2457         }
2458
2459 put_sw:
2460         tb_switch_put(sw);
2461 unlock:
2462         mutex_unlock(&tb->lock);
2463
2464         pm_runtime_mark_last_busy(&tb->dev);
2465         pm_runtime_put_autosuspend(&tb->dev);
2466
2467         kfree(ev);
2468 }
2469
2470 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2471 {
2472         struct tb_hotplug_event *ev;
2473
2474         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2475         if (!ev)
2476                 return;
2477
2478         ev->tb = tb;
2479         ev->route = route;
2480         ev->port = port;
2481         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2482         queue_work(tb->wq, &ev->work);
2483 }
2484
2485 static void tb_handle_notification(struct tb *tb, u64 route,
2486                                    const struct cfg_error_pkg *error)
2487 {
2488
2489         switch (error->error) {
2490         case TB_CFG_ERROR_PCIE_WAKE:
2491         case TB_CFG_ERROR_DP_CON_CHANGE:
2492         case TB_CFG_ERROR_DPTX_DISCOVERY:
2493                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2494                         tb_warn(tb, "could not ack notification on %llx\n",
2495                                 route);
2496                 break;
2497
2498         case TB_CFG_ERROR_DP_BW:
2499                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2500                         tb_warn(tb, "could not ack notification on %llx\n",
2501                                 route);
2502                 tb_queue_dp_bandwidth_request(tb, route, error->port);
2503                 break;
2504
2505         default:
2506                 /* Ignore for now */
2507                 break;
2508         }
2509 }
2510
2511 /*
2512  * tb_schedule_hotplug_handler() - callback function for the control channel
2513  *
2514  * Delegates to tb_handle_hotplug.
2515  */
2516 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2517                             const void *buf, size_t size)
2518 {
2519         const struct cfg_event_pkg *pkg = buf;
2520         u64 route = tb_cfg_get_route(&pkg->header);
2521
2522         switch (type) {
2523         case TB_CFG_PKG_ERROR:
2524                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2525                 return;
2526         case TB_CFG_PKG_EVENT:
2527                 break;
2528         default:
2529                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2530                 return;
2531         }
2532
2533         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2534                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2535                         pkg->port);
2536         }
2537
2538         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2539 }
2540
2541 static void tb_stop(struct tb *tb)
2542 {
2543         struct tb_cm *tcm = tb_priv(tb);
2544         struct tb_tunnel *tunnel;
2545         struct tb_tunnel *n;
2546
2547         cancel_delayed_work(&tcm->remove_work);
2548         /* tunnels are only present after everything has been initialized */
2549         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2550                 /*
2551                  * DMA tunnels require the driver to be functional so we
2552                  * tear them down. Other protocol tunnels can be left
2553                  * intact.
2554                  */
2555                 if (tb_tunnel_is_dma(tunnel))
2556                         tb_tunnel_deactivate(tunnel);
2557                 tb_tunnel_free(tunnel);
2558         }
2559         tb_switch_remove(tb->root_switch);
2560         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2561 }
2562
2563 static int tb_scan_finalize_switch(struct device *dev, void *data)
2564 {
2565         if (tb_is_switch(dev)) {
2566                 struct tb_switch *sw = tb_to_switch(dev);
2567
2568                 /*
2569                  * If we found that the switch was already setup by the
2570                  * boot firmware, mark it as authorized now before we
2571                  * send uevent to userspace.
2572                  */
2573                 if (sw->boot)
2574                         sw->authorized = 1;
2575
2576                 dev_set_uevent_suppress(dev, false);
2577                 kobject_uevent(&dev->kobj, KOBJ_ADD);
2578                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2579         }
2580
2581         return 0;
2582 }
2583
2584 static int tb_start(struct tb *tb)
2585 {
2586         struct tb_cm *tcm = tb_priv(tb);
2587         int ret;
2588
2589         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2590         if (IS_ERR(tb->root_switch))
2591                 return PTR_ERR(tb->root_switch);
2592
2593         /*
2594          * ICM firmware upgrade needs running firmware and in native
2595          * mode that is not available so disable firmware upgrade of the
2596          * root switch.
2597          *
2598          * However, USB4 routers support NVM firmware upgrade if they
2599          * implement the necessary router operations.
2600          */
2601         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2602         /* All USB4 routers support runtime PM */
2603         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2604
2605         ret = tb_switch_configure(tb->root_switch);
2606         if (ret) {
2607                 tb_switch_put(tb->root_switch);
2608                 return ret;
2609         }
2610
2611         /* Announce the switch to the world */
2612         ret = tb_switch_add(tb->root_switch);
2613         if (ret) {
2614                 tb_switch_put(tb->root_switch);
2615                 return ret;
2616         }
2617
2618         /*
2619          * To support highest CLx state, we set host router's TMU to
2620          * Normal mode.
2621          */
2622         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2623         /* Enable TMU if it is off */
2624         tb_switch_tmu_enable(tb->root_switch);
2625         /* Full scan to discover devices added before the driver was loaded. */
2626         tb_scan_switch(tb->root_switch);
2627         /* Find out tunnels created by the boot firmware */
2628         tb_discover_tunnels(tb);
2629         /* Add DP resources from the DP tunnels created by the boot firmware */
2630         tb_discover_dp_resources(tb);
2631         /*
2632          * If the boot firmware did not create USB 3.x tunnels create them
2633          * now for the whole topology.
2634          */
2635         tb_create_usb3_tunnels(tb->root_switch);
2636         /* Add DP IN resources for the root switch */
2637         tb_add_dp_resources(tb->root_switch);
2638         /* Make the discovered switches available to the userspace */
2639         device_for_each_child(&tb->root_switch->dev, NULL,
2640                               tb_scan_finalize_switch);
2641
2642         /* Allow tb_handle_hotplug to progress events */
2643         tcm->hotplug_active = true;
2644         return 0;
2645 }
2646
2647 static int tb_suspend_noirq(struct tb *tb)
2648 {
2649         struct tb_cm *tcm = tb_priv(tb);
2650
2651         tb_dbg(tb, "suspending...\n");
2652         tb_disconnect_and_release_dp(tb);
2653         tb_switch_suspend(tb->root_switch, false);
2654         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2655         tb_dbg(tb, "suspend finished\n");
2656
2657         return 0;
2658 }
2659
2660 static void tb_restore_children(struct tb_switch *sw)
2661 {
2662         struct tb_port *port;
2663
2664         /* No need to restore if the router is already unplugged */
2665         if (sw->is_unplugged)
2666                 return;
2667
2668         if (tb_enable_clx(sw))
2669                 tb_sw_warn(sw, "failed to re-enable CL states\n");
2670
2671         if (tb_enable_tmu(sw))
2672                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2673
2674         tb_switch_configuration_valid(sw);
2675
2676         tb_switch_for_each_port(sw, port) {
2677                 if (!tb_port_has_remote(port) && !port->xdomain)
2678                         continue;
2679
2680                 if (port->remote) {
2681                         tb_switch_set_link_width(port->remote->sw,
2682                                                  port->remote->sw->link_width);
2683                         tb_switch_configure_link(port->remote->sw);
2684
2685                         tb_restore_children(port->remote->sw);
2686                 } else if (port->xdomain) {
2687                         tb_port_configure_xdomain(port, port->xdomain);
2688                 }
2689         }
2690 }
2691
2692 static int tb_resume_noirq(struct tb *tb)
2693 {
2694         struct tb_cm *tcm = tb_priv(tb);
2695         struct tb_tunnel *tunnel, *n;
2696         unsigned int usb3_delay = 0;
2697         LIST_HEAD(tunnels);
2698
2699         tb_dbg(tb, "resuming...\n");
2700
2701         /* remove any pci devices the firmware might have setup */
2702         tb_switch_reset(tb->root_switch);
2703
2704         tb_switch_resume(tb->root_switch);
2705         tb_free_invalid_tunnels(tb);
2706         tb_free_unplugged_children(tb->root_switch);
2707         tb_restore_children(tb->root_switch);
2708
2709         /*
2710          * If we get here from suspend to disk the boot firmware or the
2711          * restore kernel might have created tunnels of its own. Since
2712          * we cannot be sure they are usable for us we find and tear
2713          * them down.
2714          */
2715         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2716         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2717                 if (tb_tunnel_is_usb3(tunnel))
2718                         usb3_delay = 500;
2719                 tb_tunnel_deactivate(tunnel);
2720                 tb_tunnel_free(tunnel);
2721         }
2722
2723         /* Re-create our tunnels now */
2724         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2725                 /* USB3 requires delay before it can be re-activated */
2726                 if (tb_tunnel_is_usb3(tunnel)) {
2727                         msleep(usb3_delay);
2728                         /* Only need to do it once */
2729                         usb3_delay = 0;
2730                 }
2731                 tb_tunnel_restart(tunnel);
2732         }
2733         if (!list_empty(&tcm->tunnel_list)) {
2734                 /*
2735                  * the pcie links need some time to get going.
2736                  * 100ms works for me...
2737                  */
2738                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2739                 msleep(100);
2740         }
2741          /* Allow tb_handle_hotplug to progress events */
2742         tcm->hotplug_active = true;
2743         tb_dbg(tb, "resume finished\n");
2744
2745         return 0;
2746 }
2747
2748 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2749 {
2750         struct tb_port *port;
2751         int ret = 0;
2752
2753         tb_switch_for_each_port(sw, port) {
2754                 if (tb_is_upstream_port(port))
2755                         continue;
2756                 if (port->xdomain && port->xdomain->is_unplugged) {
2757                         tb_retimer_remove_all(port);
2758                         tb_xdomain_remove(port->xdomain);
2759                         tb_port_unconfigure_xdomain(port);
2760                         port->xdomain = NULL;
2761                         ret++;
2762                 } else if (port->remote) {
2763                         ret += tb_free_unplugged_xdomains(port->remote->sw);
2764                 }
2765         }
2766
2767         return ret;
2768 }
2769
2770 static int tb_freeze_noirq(struct tb *tb)
2771 {
2772         struct tb_cm *tcm = tb_priv(tb);
2773
2774         tcm->hotplug_active = false;
2775         return 0;
2776 }
2777
2778 static int tb_thaw_noirq(struct tb *tb)
2779 {
2780         struct tb_cm *tcm = tb_priv(tb);
2781
2782         tcm->hotplug_active = true;
2783         return 0;
2784 }
2785
2786 static void tb_complete(struct tb *tb)
2787 {
2788         /*
2789          * Release any unplugged XDomains and if there is a case where
2790          * another domain is swapped in place of unplugged XDomain we
2791          * need to run another rescan.
2792          */
2793         mutex_lock(&tb->lock);
2794         if (tb_free_unplugged_xdomains(tb->root_switch))
2795                 tb_scan_switch(tb->root_switch);
2796         mutex_unlock(&tb->lock);
2797 }
2798
2799 static int tb_runtime_suspend(struct tb *tb)
2800 {
2801         struct tb_cm *tcm = tb_priv(tb);
2802
2803         mutex_lock(&tb->lock);
2804         tb_switch_suspend(tb->root_switch, true);
2805         tcm->hotplug_active = false;
2806         mutex_unlock(&tb->lock);
2807
2808         return 0;
2809 }
2810
2811 static void tb_remove_work(struct work_struct *work)
2812 {
2813         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2814         struct tb *tb = tcm_to_tb(tcm);
2815
2816         mutex_lock(&tb->lock);
2817         if (tb->root_switch) {
2818                 tb_free_unplugged_children(tb->root_switch);
2819                 tb_free_unplugged_xdomains(tb->root_switch);
2820         }
2821         mutex_unlock(&tb->lock);
2822 }
2823
2824 static int tb_runtime_resume(struct tb *tb)
2825 {
2826         struct tb_cm *tcm = tb_priv(tb);
2827         struct tb_tunnel *tunnel, *n;
2828
2829         mutex_lock(&tb->lock);
2830         tb_switch_resume(tb->root_switch);
2831         tb_free_invalid_tunnels(tb);
2832         tb_restore_children(tb->root_switch);
2833         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2834                 tb_tunnel_restart(tunnel);
2835         tcm->hotplug_active = true;
2836         mutex_unlock(&tb->lock);
2837
2838         /*
2839          * Schedule cleanup of any unplugged devices. Run this in a
2840          * separate thread to avoid possible deadlock if the device
2841          * removal runtime resumes the unplugged device.
2842          */
2843         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2844         return 0;
2845 }
2846
2847 static const struct tb_cm_ops tb_cm_ops = {
2848         .start = tb_start,
2849         .stop = tb_stop,
2850         .suspend_noirq = tb_suspend_noirq,
2851         .resume_noirq = tb_resume_noirq,
2852         .freeze_noirq = tb_freeze_noirq,
2853         .thaw_noirq = tb_thaw_noirq,
2854         .complete = tb_complete,
2855         .runtime_suspend = tb_runtime_suspend,
2856         .runtime_resume = tb_runtime_resume,
2857         .handle_event = tb_handle_event,
2858         .disapprove_switch = tb_disconnect_pci,
2859         .approve_switch = tb_tunnel_pci,
2860         .approve_xdomain_paths = tb_approve_xdomain_paths,
2861         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2862 };
2863
2864 /*
2865  * During suspend the Thunderbolt controller is reset and all PCIe
2866  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2867  * during resume. This adds device links between the tunneled PCIe
2868  * downstream ports and the NHI so that the device core will make sure
2869  * NHI is resumed first before the rest.
2870  */
2871 static bool tb_apple_add_links(struct tb_nhi *nhi)
2872 {
2873         struct pci_dev *upstream, *pdev;
2874         bool ret;
2875
2876         if (!x86_apple_machine)
2877                 return false;
2878
2879         switch (nhi->pdev->device) {
2880         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2881         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2882         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2883         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2884                 break;
2885         default:
2886                 return false;
2887         }
2888
2889         upstream = pci_upstream_bridge(nhi->pdev);
2890         while (upstream) {
2891                 if (!pci_is_pcie(upstream))
2892                         return false;
2893                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2894                         break;
2895                 upstream = pci_upstream_bridge(upstream);
2896         }
2897
2898         if (!upstream)
2899                 return false;
2900
2901         /*
2902          * For each hotplug downstream port, create add device link
2903          * back to NHI so that PCIe tunnels can be re-established after
2904          * sleep.
2905          */
2906         ret = false;
2907         for_each_pci_bridge(pdev, upstream->subordinate) {
2908                 const struct device_link *link;
2909
2910                 if (!pci_is_pcie(pdev))
2911                         continue;
2912                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2913                     !pdev->is_hotplug_bridge)
2914                         continue;
2915
2916                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2917                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
2918                                        DL_FLAG_PM_RUNTIME);
2919                 if (link) {
2920                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2921                                 dev_name(&pdev->dev));
2922                         ret = true;
2923                 } else {
2924                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2925                                  dev_name(&pdev->dev));
2926                 }
2927         }
2928
2929         return ret;
2930 }
2931
2932 struct tb *tb_probe(struct tb_nhi *nhi)
2933 {
2934         struct tb_cm *tcm;
2935         struct tb *tb;
2936
2937         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2938         if (!tb)
2939                 return NULL;
2940
2941         if (tb_acpi_may_tunnel_pcie())
2942                 tb->security_level = TB_SECURITY_USER;
2943         else
2944                 tb->security_level = TB_SECURITY_NOPCIE;
2945
2946         tb->cm_ops = &tb_cm_ops;
2947
2948         tcm = tb_priv(tb);
2949         INIT_LIST_HEAD(&tcm->tunnel_list);
2950         INIT_LIST_HEAD(&tcm->dp_resources);
2951         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2952         tb_init_bandwidth_groups(tcm);
2953
2954         tb_dbg(tb, "using software connection manager\n");
2955
2956         /*
2957          * Device links are needed to make sure we establish tunnels
2958          * before the PCIe/USB stack is resumed so complain here if we
2959          * found them missing.
2960          */
2961         if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2962                 tb_warn(tb, "device links to tunneled native ports are missing!\n");
2963
2964         return tb;
2965 }