Merge tag 'staging-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-block.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT              100     /* ms */
20 #define TB_RELEASE_BW_TIMEOUT   10000   /* ms */
21
22 /*
23  * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24  * direction. This is 40G - 10% guard band bandwidth.
25  */
26 #define TB_ASYM_MIN             (40000 * 90 / 100)
27
28 /*
29  * Threshold bandwidth (in Mb/s) that is used to switch the links to
30  * asymmetric and back. This is selected as 45G which means when the
31  * request is higher than this, we switch the link to asymmetric, and
32  * when it is less than this we switch it back. The 45G is selected so
33  * that we still have 27G (of the total 72G) for bulk PCIe traffic when
34  * switching back to symmetric.
35  */
36 #define TB_ASYM_THRESHOLD       45000
37
38 #define MAX_GROUPS              7       /* max Group_ID is 7 */
39
40 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
41 module_param_named(asym_threshold, asym_threshold, uint, 0444);
42 MODULE_PARM_DESC(asym_threshold,
43                 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
44                 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
45
46 /**
47  * struct tb_cm - Simple Thunderbolt connection manager
48  * @tunnel_list: List of active tunnels
49  * @dp_resources: List of available DP resources for DP tunneling
50  * @hotplug_active: tb_handle_hotplug will stop progressing plug
51  *                  events and exit if this is not set (it needs to
52  *                  acquire the lock one more time). Used to drain wq
53  *                  after cfg has been paused.
54  * @remove_work: Work used to remove any unplugged routers after
55  *               runtime resume
56  * @groups: Bandwidth groups used in this domain.
57  */
58 struct tb_cm {
59         struct list_head tunnel_list;
60         struct list_head dp_resources;
61         bool hotplug_active;
62         struct delayed_work remove_work;
63         struct tb_bandwidth_group groups[MAX_GROUPS];
64 };
65
66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 {
68         return ((void *)tcm - sizeof(struct tb));
69 }
70
71 struct tb_hotplug_event {
72         struct work_struct work;
73         struct tb *tb;
74         u64 route;
75         u8 port;
76         bool unplug;
77 };
78
79 static void tb_handle_hotplug(struct work_struct *work);
80
81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
82 {
83         struct tb_hotplug_event *ev;
84
85         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
86         if (!ev)
87                 return;
88
89         ev->tb = tb;
90         ev->route = route;
91         ev->port = port;
92         ev->unplug = unplug;
93         INIT_WORK(&ev->work, tb_handle_hotplug);
94         queue_work(tb->wq, &ev->work);
95 }
96
97 /* enumeration & hot plug handling */
98
99 static void tb_add_dp_resources(struct tb_switch *sw)
100 {
101         struct tb_cm *tcm = tb_priv(sw->tb);
102         struct tb_port *port;
103
104         tb_switch_for_each_port(sw, port) {
105                 if (!tb_port_is_dpin(port))
106                         continue;
107
108                 if (!tb_switch_query_dp_resource(sw, port))
109                         continue;
110
111                 /*
112                  * If DP IN on device router exist, position it at the
113                  * beginning of the DP resources list, so that it is used
114                  * before DP IN of the host router. This way external GPU(s)
115                  * will be prioritized when pairing DP IN to a DP OUT.
116                  */
117                 if (tb_route(sw))
118                         list_add(&port->list, &tcm->dp_resources);
119                 else
120                         list_add_tail(&port->list, &tcm->dp_resources);
121
122                 tb_port_dbg(port, "DP IN resource available\n");
123         }
124 }
125
126 static void tb_remove_dp_resources(struct tb_switch *sw)
127 {
128         struct tb_cm *tcm = tb_priv(sw->tb);
129         struct tb_port *port, *tmp;
130
131         /* Clear children resources first */
132         tb_switch_for_each_port(sw, port) {
133                 if (tb_port_has_remote(port))
134                         tb_remove_dp_resources(port->remote->sw);
135         }
136
137         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
138                 if (port->sw == sw) {
139                         tb_port_dbg(port, "DP OUT resource unavailable\n");
140                         list_del_init(&port->list);
141                 }
142         }
143 }
144
145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
146 {
147         struct tb_cm *tcm = tb_priv(tb);
148         struct tb_port *p;
149
150         list_for_each_entry(p, &tcm->dp_resources, list) {
151                 if (p == port)
152                         return;
153         }
154
155         tb_port_dbg(port, "DP %s resource available discovered\n",
156                     tb_port_is_dpin(port) ? "IN" : "OUT");
157         list_add_tail(&port->list, &tcm->dp_resources);
158 }
159
160 static void tb_discover_dp_resources(struct tb *tb)
161 {
162         struct tb_cm *tcm = tb_priv(tb);
163         struct tb_tunnel *tunnel;
164
165         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
166                 if (tb_tunnel_is_dp(tunnel))
167                         tb_discover_dp_resource(tb, tunnel->dst_port);
168         }
169 }
170
171 /* Enables CL states up to host router */
172 static int tb_enable_clx(struct tb_switch *sw)
173 {
174         struct tb_cm *tcm = tb_priv(sw->tb);
175         unsigned int clx = TB_CL0S | TB_CL1;
176         const struct tb_tunnel *tunnel;
177         int ret;
178
179         /*
180          * Currently only enable CLx for the first link. This is enough
181          * to allow the CPU to save energy at least on Intel hardware
182          * and makes it slightly simpler to implement. We may change
183          * this in the future to cover the whole topology if it turns
184          * out to be beneficial.
185          */
186         while (sw && tb_switch_depth(sw) > 1)
187                 sw = tb_switch_parent(sw);
188
189         if (!sw)
190                 return 0;
191
192         if (tb_switch_depth(sw) != 1)
193                 return 0;
194
195         /*
196          * If we are re-enabling then check if there is an active DMA
197          * tunnel and in that case bail out.
198          */
199         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
200                 if (tb_tunnel_is_dma(tunnel)) {
201                         if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
202                                 return 0;
203                 }
204         }
205
206         /*
207          * Initially try with CL2. If that's not supported by the
208          * topology try with CL0s and CL1 and then give up.
209          */
210         ret = tb_switch_clx_enable(sw, clx | TB_CL2);
211         if (ret == -EOPNOTSUPP)
212                 ret = tb_switch_clx_enable(sw, clx);
213         return ret == -EOPNOTSUPP ? 0 : ret;
214 }
215
216 /**
217  * tb_disable_clx() - Disable CL states up to host router
218  * @sw: Router to start
219  *
220  * Disables CL states from @sw up to the host router. Returns true if
221  * any CL state were disabled. This can be used to figure out whether
222  * the link was setup by us or the boot firmware so we don't
223  * accidentally enable them if they were not enabled during discovery.
224  */
225 static bool tb_disable_clx(struct tb_switch *sw)
226 {
227         bool disabled = false;
228
229         do {
230                 int ret;
231
232                 ret = tb_switch_clx_disable(sw);
233                 if (ret > 0)
234                         disabled = true;
235                 else if (ret < 0)
236                         tb_sw_warn(sw, "failed to disable CL states\n");
237
238                 sw = tb_switch_parent(sw);
239         } while (sw);
240
241         return disabled;
242 }
243
244 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
245 {
246         struct tb_switch *sw;
247
248         sw = tb_to_switch(dev);
249         if (!sw)
250                 return 0;
251
252         if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
253                 enum tb_switch_tmu_mode mode;
254                 int ret;
255
256                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
257                         mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
258                 else
259                         mode = TB_SWITCH_TMU_MODE_HIFI_BI;
260
261                 ret = tb_switch_tmu_configure(sw, mode);
262                 if (ret)
263                         return ret;
264
265                 return tb_switch_tmu_enable(sw);
266         }
267
268         return 0;
269 }
270
271 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
272 {
273         struct tb_switch *sw;
274
275         if (!tunnel)
276                 return;
277
278         /*
279          * Once first DP tunnel is established we change the TMU
280          * accuracy of first depth child routers (and the host router)
281          * to the highest. This is needed for the DP tunneling to work
282          * but also allows CL0s.
283          *
284          * If both routers are v2 then we don't need to do anything as
285          * they are using enhanced TMU mode that allows all CLx.
286          */
287         sw = tunnel->tb->root_switch;
288         device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
289 }
290
291 static int tb_enable_tmu(struct tb_switch *sw)
292 {
293         int ret;
294
295         /*
296          * If both routers at the end of the link are v2 we simply
297          * enable the enhanched uni-directional mode. That covers all
298          * the CL states. For v1 and before we need to use the normal
299          * rate to allow CL1 (when supported). Otherwise we keep the TMU
300          * running at the highest accuracy.
301          */
302         ret = tb_switch_tmu_configure(sw,
303                         TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
304         if (ret == -EOPNOTSUPP) {
305                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
306                         ret = tb_switch_tmu_configure(sw,
307                                         TB_SWITCH_TMU_MODE_LOWRES);
308                 else
309                         ret = tb_switch_tmu_configure(sw,
310                                         TB_SWITCH_TMU_MODE_HIFI_BI);
311         }
312         if (ret)
313                 return ret;
314
315         /* If it is already enabled in correct mode, don't touch it */
316         if (tb_switch_tmu_is_enabled(sw))
317                 return 0;
318
319         ret = tb_switch_tmu_disable(sw);
320         if (ret)
321                 return ret;
322
323         ret = tb_switch_tmu_post_time(sw);
324         if (ret)
325                 return ret;
326
327         return tb_switch_tmu_enable(sw);
328 }
329
330 static void tb_switch_discover_tunnels(struct tb_switch *sw,
331                                        struct list_head *list,
332                                        bool alloc_hopids)
333 {
334         struct tb *tb = sw->tb;
335         struct tb_port *port;
336
337         tb_switch_for_each_port(sw, port) {
338                 struct tb_tunnel *tunnel = NULL;
339
340                 switch (port->config.type) {
341                 case TB_TYPE_DP_HDMI_IN:
342                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
343                         tb_increase_tmu_accuracy(tunnel);
344                         break;
345
346                 case TB_TYPE_PCIE_DOWN:
347                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
348                         break;
349
350                 case TB_TYPE_USB3_DOWN:
351                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
352                         break;
353
354                 default:
355                         break;
356                 }
357
358                 if (tunnel)
359                         list_add_tail(&tunnel->list, list);
360         }
361
362         tb_switch_for_each_port(sw, port) {
363                 if (tb_port_has_remote(port)) {
364                         tb_switch_discover_tunnels(port->remote->sw, list,
365                                                    alloc_hopids);
366                 }
367         }
368 }
369
370 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
371 {
372         if (tb_switch_is_usb4(port->sw))
373                 return usb4_port_configure_xdomain(port, xd);
374         return tb_lc_configure_xdomain(port);
375 }
376
377 static void tb_port_unconfigure_xdomain(struct tb_port *port)
378 {
379         if (tb_switch_is_usb4(port->sw))
380                 usb4_port_unconfigure_xdomain(port);
381         else
382                 tb_lc_unconfigure_xdomain(port);
383 }
384
385 static void tb_scan_xdomain(struct tb_port *port)
386 {
387         struct tb_switch *sw = port->sw;
388         struct tb *tb = sw->tb;
389         struct tb_xdomain *xd;
390         u64 route;
391
392         if (!tb_is_xdomain_enabled())
393                 return;
394
395         route = tb_downstream_route(port);
396         xd = tb_xdomain_find_by_route(tb, route);
397         if (xd) {
398                 tb_xdomain_put(xd);
399                 return;
400         }
401
402         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
403                               NULL);
404         if (xd) {
405                 tb_port_at(route, sw)->xdomain = xd;
406                 tb_port_configure_xdomain(port, xd);
407                 tb_xdomain_add(xd);
408         }
409 }
410
411 /**
412  * tb_find_unused_port() - return the first inactive port on @sw
413  * @sw: Switch to find the port on
414  * @type: Port type to look for
415  */
416 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
417                                            enum tb_port_type type)
418 {
419         struct tb_port *port;
420
421         tb_switch_for_each_port(sw, port) {
422                 if (tb_is_upstream_port(port))
423                         continue;
424                 if (port->config.type != type)
425                         continue;
426                 if (!port->cap_adap)
427                         continue;
428                 if (tb_port_is_enabled(port))
429                         continue;
430                 return port;
431         }
432         return NULL;
433 }
434
435 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
436                                          const struct tb_port *port)
437 {
438         struct tb_port *down;
439
440         down = usb4_switch_map_usb3_down(sw, port);
441         if (down && !tb_usb3_port_is_enabled(down))
442                 return down;
443         return NULL;
444 }
445
446 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
447                                         struct tb_port *src_port,
448                                         struct tb_port *dst_port)
449 {
450         struct tb_cm *tcm = tb_priv(tb);
451         struct tb_tunnel *tunnel;
452
453         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
454                 if (tunnel->type == type &&
455                     ((src_port && src_port == tunnel->src_port) ||
456                      (dst_port && dst_port == tunnel->dst_port))) {
457                         return tunnel;
458                 }
459         }
460
461         return NULL;
462 }
463
464 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
465                                                    struct tb_port *src_port,
466                                                    struct tb_port *dst_port)
467 {
468         struct tb_port *port, *usb3_down;
469         struct tb_switch *sw;
470
471         /* Pick the router that is deepest in the topology */
472         if (tb_port_path_direction_downstream(src_port, dst_port))
473                 sw = dst_port->sw;
474         else
475                 sw = src_port->sw;
476
477         /* Can't be the host router */
478         if (sw == tb->root_switch)
479                 return NULL;
480
481         /* Find the downstream USB4 port that leads to this router */
482         port = tb_port_at(tb_route(sw), tb->root_switch);
483         /* Find the corresponding host router USB3 downstream port */
484         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
485         if (!usb3_down)
486                 return NULL;
487
488         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
489 }
490
491 /**
492  * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
493  * @tb: Domain structure
494  * @src_port: Source protocol adapter
495  * @dst_port: Destination protocol adapter
496  * @port: USB4 port the consumed bandwidth is calculated
497  * @consumed_up: Consumed upsream bandwidth (Mb/s)
498  * @consumed_down: Consumed downstream bandwidth (Mb/s)
499  *
500  * Calculates consumed USB3 and PCIe bandwidth at @port between path
501  * from @src_port to @dst_port. Does not take tunnel starting from
502  * @src_port and ending from @src_port into account.
503  */
504 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
505                                            struct tb_port *src_port,
506                                            struct tb_port *dst_port,
507                                            struct tb_port *port,
508                                            int *consumed_up,
509                                            int *consumed_down)
510 {
511         int pci_consumed_up, pci_consumed_down;
512         struct tb_tunnel *tunnel;
513
514         *consumed_up = *consumed_down = 0;
515
516         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
517         if (tunnel && tunnel->src_port != src_port &&
518             tunnel->dst_port != dst_port) {
519                 int ret;
520
521                 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
522                                                    consumed_down);
523                 if (ret)
524                         return ret;
525         }
526
527         /*
528          * If there is anything reserved for PCIe bulk traffic take it
529          * into account here too.
530          */
531         if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
532                 *consumed_up += pci_consumed_up;
533                 *consumed_down += pci_consumed_down;
534         }
535
536         return 0;
537 }
538
539 /**
540  * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
541  * @tb: Domain structure
542  * @src_port: Source protocol adapter
543  * @dst_port: Destination protocol adapter
544  * @port: USB4 port the consumed bandwidth is calculated
545  * @consumed_up: Consumed upsream bandwidth (Mb/s)
546  * @consumed_down: Consumed downstream bandwidth (Mb/s)
547  *
548  * Calculates consumed DP bandwidth at @port between path from @src_port
549  * to @dst_port. Does not take tunnel starting from @src_port and ending
550  * from @src_port into account.
551  *
552  * If there is bandwidth reserved for any of the groups between
553  * @src_port and @dst_port (but not yet used) that is also taken into
554  * account in the returned consumed bandwidth.
555  */
556 static int tb_consumed_dp_bandwidth(struct tb *tb,
557                                     struct tb_port *src_port,
558                                     struct tb_port *dst_port,
559                                     struct tb_port *port,
560                                     int *consumed_up,
561                                     int *consumed_down)
562 {
563         int group_reserved[MAX_GROUPS] = {};
564         struct tb_cm *tcm = tb_priv(tb);
565         struct tb_tunnel *tunnel;
566         bool downstream;
567         int i, ret;
568
569         *consumed_up = *consumed_down = 0;
570
571         /*
572          * Find all DP tunnels that cross the port and reduce
573          * their consumed bandwidth from the available.
574          */
575         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
576                 const struct tb_bandwidth_group *group;
577                 int dp_consumed_up, dp_consumed_down;
578
579                 if (tb_tunnel_is_invalid(tunnel))
580                         continue;
581
582                 if (!tb_tunnel_is_dp(tunnel))
583                         continue;
584
585                 if (!tb_tunnel_port_on_path(tunnel, port))
586                         continue;
587
588                 /*
589                  * Calculate what is reserved for groups crossing the
590                  * same ports only once (as that is reserved for all the
591                  * tunnels in the group).
592                  */
593                 group = tunnel->src_port->group;
594                 if (group && group->reserved && !group_reserved[group->index])
595                         group_reserved[group->index] = group->reserved;
596
597                 /*
598                  * Ignore the DP tunnel between src_port and dst_port
599                  * because it is the same tunnel and we may be
600                  * re-calculating estimated bandwidth.
601                  */
602                 if (tunnel->src_port == src_port &&
603                     tunnel->dst_port == dst_port)
604                         continue;
605
606                 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
607                                                    &dp_consumed_down);
608                 if (ret)
609                         return ret;
610
611                 *consumed_up += dp_consumed_up;
612                 *consumed_down += dp_consumed_down;
613         }
614
615         downstream = tb_port_path_direction_downstream(src_port, dst_port);
616         for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
617                 if (downstream)
618                         *consumed_down += group_reserved[i];
619                 else
620                         *consumed_up += group_reserved[i];
621         }
622
623         return 0;
624 }
625
626 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
627                               struct tb_port *port)
628 {
629         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
630         enum tb_link_width width;
631
632         if (tb_is_upstream_port(port))
633                 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
634         else
635                 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
636
637         return tb_port_width_supported(port, width);
638 }
639
640 /**
641  * tb_maximum_bandwidth() - Maximum bandwidth over a single link
642  * @tb: Domain structure
643  * @src_port: Source protocol adapter
644  * @dst_port: Destination protocol adapter
645  * @port: USB4 port the total bandwidth is calculated
646  * @max_up: Maximum upstream bandwidth (Mb/s)
647  * @max_down: Maximum downstream bandwidth (Mb/s)
648  * @include_asym: Include bandwidth if the link is switched from
649  *                symmetric to asymmetric
650  *
651  * Returns maximum possible bandwidth in @max_up and @max_down over a
652  * single link at @port. If @include_asym is set then includes the
653  * additional banwdith if the links are transitioned into asymmetric to
654  * direction from @src_port to @dst_port.
655  */
656 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
657                                 struct tb_port *dst_port, struct tb_port *port,
658                                 int *max_up, int *max_down, bool include_asym)
659 {
660         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
661         int link_speed, link_width, up_bw, down_bw;
662
663         /*
664          * Can include asymmetric, only if it is actually supported by
665          * the lane adapter.
666          */
667         if (!tb_asym_supported(src_port, dst_port, port))
668                 include_asym = false;
669
670         if (tb_is_upstream_port(port)) {
671                 link_speed = port->sw->link_speed;
672                 /*
673                  * sw->link_width is from upstream perspective so we use
674                  * the opposite for downstream of the host router.
675                  */
676                 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
677                         up_bw = link_speed * 3 * 1000;
678                         down_bw = link_speed * 1 * 1000;
679                 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
680                         up_bw = link_speed * 1 * 1000;
681                         down_bw = link_speed * 3 * 1000;
682                 } else if (include_asym) {
683                         /*
684                          * The link is symmetric at the moment but we
685                          * can switch it to asymmetric as needed. Report
686                          * this bandwidth as available (even though it
687                          * is not yet enabled).
688                          */
689                         if (downstream) {
690                                 up_bw = link_speed * 1 * 1000;
691                                 down_bw = link_speed * 3 * 1000;
692                         } else {
693                                 up_bw = link_speed * 3 * 1000;
694                                 down_bw = link_speed * 1 * 1000;
695                         }
696                 } else {
697                         up_bw = link_speed * port->sw->link_width * 1000;
698                         down_bw = up_bw;
699                 }
700         } else {
701                 link_speed = tb_port_get_link_speed(port);
702                 if (link_speed < 0)
703                         return link_speed;
704
705                 link_width = tb_port_get_link_width(port);
706                 if (link_width < 0)
707                         return link_width;
708
709                 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
710                         up_bw = link_speed * 1 * 1000;
711                         down_bw = link_speed * 3 * 1000;
712                 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
713                         up_bw = link_speed * 3 * 1000;
714                         down_bw = link_speed * 1 * 1000;
715                 } else if (include_asym) {
716                         /*
717                          * The link is symmetric at the moment but we
718                          * can switch it to asymmetric as needed. Report
719                          * this bandwidth as available (even though it
720                          * is not yet enabled).
721                          */
722                         if (downstream) {
723                                 up_bw = link_speed * 1 * 1000;
724                                 down_bw = link_speed * 3 * 1000;
725                         } else {
726                                 up_bw = link_speed * 3 * 1000;
727                                 down_bw = link_speed * 1 * 1000;
728                         }
729                 } else {
730                         up_bw = link_speed * link_width * 1000;
731                         down_bw = up_bw;
732                 }
733         }
734
735         /* Leave 10% guard band */
736         *max_up = up_bw - up_bw / 10;
737         *max_down = down_bw - down_bw / 10;
738
739         tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
740         return 0;
741 }
742
743 /**
744  * tb_available_bandwidth() - Available bandwidth for tunneling
745  * @tb: Domain structure
746  * @src_port: Source protocol adapter
747  * @dst_port: Destination protocol adapter
748  * @available_up: Available bandwidth upstream (Mb/s)
749  * @available_down: Available bandwidth downstream (Mb/s)
750  * @include_asym: Include bandwidth if the link is switched from
751  *                symmetric to asymmetric
752  *
753  * Calculates maximum available bandwidth for protocol tunneling between
754  * @src_port and @dst_port at the moment. This is minimum of maximum
755  * link bandwidth across all links reduced by currently consumed
756  * bandwidth on that link.
757  *
758  * If @include_asym is true then includes also bandwidth that can be
759  * added when the links are transitioned into asymmetric (but does not
760  * transition the links).
761  */
762 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
763                                  struct tb_port *dst_port, int *available_up,
764                                  int *available_down, bool include_asym)
765 {
766         struct tb_port *port;
767         int ret;
768
769         /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
770         *available_up = *available_down = 120000;
771
772         /* Find the minimum available bandwidth over all links */
773         tb_for_each_port_on_path(src_port, dst_port, port) {
774                 int max_up, max_down, consumed_up, consumed_down;
775
776                 if (!tb_port_is_null(port))
777                         continue;
778
779                 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
780                                            &max_up, &max_down, include_asym);
781                 if (ret)
782                         return ret;
783
784                 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
785                                                       port, &consumed_up,
786                                                       &consumed_down);
787                 if (ret)
788                         return ret;
789                 max_up -= consumed_up;
790                 max_down -= consumed_down;
791
792                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
793                                                &consumed_up, &consumed_down);
794                 if (ret)
795                         return ret;
796                 max_up -= consumed_up;
797                 max_down -= consumed_down;
798
799                 if (max_up < *available_up)
800                         *available_up = max_up;
801                 if (max_down < *available_down)
802                         *available_down = max_down;
803         }
804
805         if (*available_up < 0)
806                 *available_up = 0;
807         if (*available_down < 0)
808                 *available_down = 0;
809
810         return 0;
811 }
812
813 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
814                                             struct tb_port *src_port,
815                                             struct tb_port *dst_port)
816 {
817         struct tb_tunnel *tunnel;
818
819         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
820         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
821 }
822
823 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
824                                       struct tb_port *dst_port)
825 {
826         int ret, available_up, available_down;
827         struct tb_tunnel *tunnel;
828
829         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
830         if (!tunnel)
831                 return;
832
833         tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
834
835         /*
836          * Calculate available bandwidth for the first hop USB3 tunnel.
837          * That determines the whole USB3 bandwidth for this branch.
838          */
839         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
840                                      &available_up, &available_down, false);
841         if (ret) {
842                 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
843                 return;
844         }
845
846         tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
847                       available_down);
848
849         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
850 }
851
852 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
853 {
854         struct tb_switch *parent = tb_switch_parent(sw);
855         int ret, available_up, available_down;
856         struct tb_port *up, *down, *port;
857         struct tb_cm *tcm = tb_priv(tb);
858         struct tb_tunnel *tunnel;
859
860         if (!tb_acpi_may_tunnel_usb3()) {
861                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
862                 return 0;
863         }
864
865         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
866         if (!up)
867                 return 0;
868
869         if (!sw->link_usb4)
870                 return 0;
871
872         /*
873          * Look up available down port. Since we are chaining it should
874          * be found right above this switch.
875          */
876         port = tb_switch_downstream_port(sw);
877         down = tb_find_usb3_down(parent, port);
878         if (!down)
879                 return 0;
880
881         if (tb_route(parent)) {
882                 struct tb_port *parent_up;
883                 /*
884                  * Check first that the parent switch has its upstream USB3
885                  * port enabled. Otherwise the chain is not complete and
886                  * there is no point setting up a new tunnel.
887                  */
888                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
889                 if (!parent_up || !tb_port_is_enabled(parent_up))
890                         return 0;
891
892                 /* Make all unused bandwidth available for the new tunnel */
893                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
894                 if (ret)
895                         return ret;
896         }
897
898         ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
899                                      false);
900         if (ret)
901                 goto err_reclaim;
902
903         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
904                     available_up, available_down);
905
906         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
907                                       available_down);
908         if (!tunnel) {
909                 ret = -ENOMEM;
910                 goto err_reclaim;
911         }
912
913         if (tb_tunnel_activate(tunnel)) {
914                 tb_port_info(up,
915                              "USB3 tunnel activation failed, aborting\n");
916                 ret = -EIO;
917                 goto err_free;
918         }
919
920         list_add_tail(&tunnel->list, &tcm->tunnel_list);
921         if (tb_route(parent))
922                 tb_reclaim_usb3_bandwidth(tb, down, up);
923
924         return 0;
925
926 err_free:
927         tb_tunnel_free(tunnel);
928 err_reclaim:
929         if (tb_route(parent))
930                 tb_reclaim_usb3_bandwidth(tb, down, up);
931
932         return ret;
933 }
934
935 static int tb_create_usb3_tunnels(struct tb_switch *sw)
936 {
937         struct tb_port *port;
938         int ret;
939
940         if (!tb_acpi_may_tunnel_usb3())
941                 return 0;
942
943         if (tb_route(sw)) {
944                 ret = tb_tunnel_usb3(sw->tb, sw);
945                 if (ret)
946                         return ret;
947         }
948
949         tb_switch_for_each_port(sw, port) {
950                 if (!tb_port_has_remote(port))
951                         continue;
952                 ret = tb_create_usb3_tunnels(port->remote->sw);
953                 if (ret)
954                         return ret;
955         }
956
957         return 0;
958 }
959
960 /**
961  * tb_configure_asym() - Transition links to asymmetric if needed
962  * @tb: Domain structure
963  * @src_port: Source adapter to start the transition
964  * @dst_port: Destination adapter
965  * @requested_up: Additional bandwidth (Mb/s) required upstream
966  * @requested_down: Additional bandwidth (Mb/s) required downstream
967  *
968  * Transition links between @src_port and @dst_port into asymmetric, with
969  * three lanes in the direction from @src_port towards @dst_port and one lane
970  * in the opposite direction, if the bandwidth requirements
971  * (requested + currently consumed) on that link exceed @asym_threshold.
972  *
973  * Must be called with available >= requested over all links.
974  */
975 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
976                              struct tb_port *dst_port, int requested_up,
977                              int requested_down)
978 {
979         bool clx = false, clx_disabled = false, downstream;
980         struct tb_switch *sw;
981         struct tb_port *up;
982         int ret = 0;
983
984         if (!asym_threshold)
985                 return 0;
986
987         downstream = tb_port_path_direction_downstream(src_port, dst_port);
988         /* Pick up router deepest in the hierarchy */
989         if (downstream)
990                 sw = dst_port->sw;
991         else
992                 sw = src_port->sw;
993
994         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
995                 struct tb_port *down = tb_switch_downstream_port(up->sw);
996                 enum tb_link_width width_up, width_down;
997                 int consumed_up, consumed_down;
998
999                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1000                                                &consumed_up, &consumed_down);
1001                 if (ret)
1002                         break;
1003
1004                 if (downstream) {
1005                         /*
1006                          * Downstream so make sure upstream is within the 36G
1007                          * (40G - guard band 10%), and the requested is above
1008                          * what the threshold is.
1009                          */
1010                         if (consumed_up + requested_up >= TB_ASYM_MIN) {
1011                                 ret = -ENOBUFS;
1012                                 break;
1013                         }
1014                         /* Does consumed + requested exceed the threshold */
1015                         if (consumed_down + requested_down < asym_threshold)
1016                                 continue;
1017
1018                         width_up = TB_LINK_WIDTH_ASYM_RX;
1019                         width_down = TB_LINK_WIDTH_ASYM_TX;
1020                 } else {
1021                         /* Upstream, the opposite of above */
1022                         if (consumed_down + requested_down >= TB_ASYM_MIN) {
1023                                 ret = -ENOBUFS;
1024                                 break;
1025                         }
1026                         if (consumed_up + requested_up < asym_threshold)
1027                                 continue;
1028
1029                         width_up = TB_LINK_WIDTH_ASYM_TX;
1030                         width_down = TB_LINK_WIDTH_ASYM_RX;
1031                 }
1032
1033                 if (up->sw->link_width == width_up)
1034                         continue;
1035
1036                 if (!tb_port_width_supported(up, width_up) ||
1037                     !tb_port_width_supported(down, width_down))
1038                         continue;
1039
1040                 /*
1041                  * Disable CL states before doing any transitions. We
1042                  * delayed it until now that we know there is a real
1043                  * transition taking place.
1044                  */
1045                 if (!clx_disabled) {
1046                         clx = tb_disable_clx(sw);
1047                         clx_disabled = true;
1048                 }
1049
1050                 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1051
1052                 /*
1053                  * Here requested + consumed > threshold so we need to
1054                  * transtion the link into asymmetric now.
1055                  */
1056                 ret = tb_switch_set_link_width(up->sw, width_up);
1057                 if (ret) {
1058                         tb_sw_warn(up->sw, "failed to set link width\n");
1059                         break;
1060                 }
1061         }
1062
1063         /* Re-enable CL states if they were previosly enabled */
1064         if (clx)
1065                 tb_enable_clx(sw);
1066
1067         return ret;
1068 }
1069
1070 /**
1071  * tb_configure_sym() - Transition links to symmetric if possible
1072  * @tb: Domain structure
1073  * @src_port: Source adapter to start the transition
1074  * @dst_port: Destination adapter
1075  * @keep_asym: Keep asymmetric link if preferred
1076  *
1077  * Goes over each link from @src_port to @dst_port and tries to
1078  * transition the link to symmetric if the currently consumed bandwidth
1079  * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1080  */
1081 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1082                             struct tb_port *dst_port, bool keep_asym)
1083 {
1084         bool clx = false, clx_disabled = false, downstream;
1085         struct tb_switch *sw;
1086         struct tb_port *up;
1087         int ret = 0;
1088
1089         if (!asym_threshold)
1090                 return 0;
1091
1092         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1093         /* Pick up router deepest in the hierarchy */
1094         if (downstream)
1095                 sw = dst_port->sw;
1096         else
1097                 sw = src_port->sw;
1098
1099         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1100                 int consumed_up, consumed_down;
1101
1102                 /* Already symmetric */
1103                 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1104                         continue;
1105                 /* Unplugged, no need to switch */
1106                 if (up->sw->is_unplugged)
1107                         continue;
1108
1109                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1110                                                &consumed_up, &consumed_down);
1111                 if (ret)
1112                         break;
1113
1114                 if (downstream) {
1115                         /*
1116                          * Downstream so we want the consumed_down < threshold.
1117                          * Upstream traffic should be less than 36G (40G
1118                          * guard band 10%) as the link was configured asymmetric
1119                          * already.
1120                          */
1121                         if (consumed_down >= asym_threshold)
1122                                 continue;
1123                 } else {
1124                         if (consumed_up >= asym_threshold)
1125                                 continue;
1126                 }
1127
1128                 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1129                         continue;
1130
1131                 /*
1132                  * Here consumed < threshold so we can transition the
1133                  * link to symmetric.
1134                  *
1135                  * However, if the router prefers asymmetric link we
1136                  * honor that (unless @keep_asym is %false).
1137                  */
1138                 if (keep_asym &&
1139                     up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1140                         tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1141                         continue;
1142                 }
1143
1144                 /* Disable CL states before doing any transitions */
1145                 if (!clx_disabled) {
1146                         clx = tb_disable_clx(sw);
1147                         clx_disabled = true;
1148                 }
1149
1150                 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1151
1152                 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1153                 if (ret) {
1154                         tb_sw_warn(up->sw, "failed to set link width\n");
1155                         break;
1156                 }
1157         }
1158
1159         /* Re-enable CL states if they were previosly enabled */
1160         if (clx)
1161                 tb_enable_clx(sw);
1162
1163         return ret;
1164 }
1165
1166 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1167                               struct tb_switch *sw)
1168 {
1169         struct tb *tb = sw->tb;
1170
1171         /* Link the routers using both links if available */
1172         down->remote = up;
1173         up->remote = down;
1174         if (down->dual_link_port && up->dual_link_port) {
1175                 down->dual_link_port->remote = up->dual_link_port;
1176                 up->dual_link_port->remote = down->dual_link_port;
1177         }
1178
1179         /*
1180          * Enable lane bonding if the link is currently two single lane
1181          * links.
1182          */
1183         if (sw->link_width < TB_LINK_WIDTH_DUAL)
1184                 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1185
1186         /*
1187          * Device router that comes up as symmetric link is
1188          * connected deeper in the hierarchy, we transition the links
1189          * above into symmetric if bandwidth allows.
1190          */
1191         if (tb_switch_depth(sw) > 1 &&
1192             tb_port_get_link_generation(up) >= 4 &&
1193             up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1194                 struct tb_port *host_port;
1195
1196                 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1197                 tb_configure_sym(tb, host_port, up, false);
1198         }
1199
1200         /* Set the link configured */
1201         tb_switch_configure_link(sw);
1202 }
1203
1204 static void tb_scan_port(struct tb_port *port);
1205
1206 /*
1207  * tb_scan_switch() - scan for and initialize downstream switches
1208  */
1209 static void tb_scan_switch(struct tb_switch *sw)
1210 {
1211         struct tb_port *port;
1212
1213         pm_runtime_get_sync(&sw->dev);
1214
1215         tb_switch_for_each_port(sw, port)
1216                 tb_scan_port(port);
1217
1218         pm_runtime_mark_last_busy(&sw->dev);
1219         pm_runtime_put_autosuspend(&sw->dev);
1220 }
1221
1222 /*
1223  * tb_scan_port() - check for and initialize switches below port
1224  */
1225 static void tb_scan_port(struct tb_port *port)
1226 {
1227         struct tb_cm *tcm = tb_priv(port->sw->tb);
1228         struct tb_port *upstream_port;
1229         bool discovery = false;
1230         struct tb_switch *sw;
1231
1232         if (tb_is_upstream_port(port))
1233                 return;
1234
1235         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1236             !tb_dp_port_is_enabled(port)) {
1237                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1238                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1239                                  false);
1240                 return;
1241         }
1242
1243         if (port->config.type != TB_TYPE_PORT)
1244                 return;
1245         if (port->dual_link_port && port->link_nr)
1246                 return; /*
1247                          * Downstream switch is reachable through two ports.
1248                          * Only scan on the primary port (link_nr == 0).
1249                          */
1250
1251         if (port->usb4)
1252                 pm_runtime_get_sync(&port->usb4->dev);
1253
1254         if (tb_wait_for_port(port, false) <= 0)
1255                 goto out_rpm_put;
1256         if (port->remote) {
1257                 tb_port_dbg(port, "port already has a remote\n");
1258                 goto out_rpm_put;
1259         }
1260
1261         tb_retimer_scan(port, true);
1262
1263         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1264                              tb_downstream_route(port));
1265         if (IS_ERR(sw)) {
1266                 /*
1267                  * If there is an error accessing the connected switch
1268                  * it may be connected to another domain. Also we allow
1269                  * the other domain to be connected to a max depth switch.
1270                  */
1271                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1272                         tb_scan_xdomain(port);
1273                 goto out_rpm_put;
1274         }
1275
1276         if (tb_switch_configure(sw)) {
1277                 tb_switch_put(sw);
1278                 goto out_rpm_put;
1279         }
1280
1281         /*
1282          * If there was previously another domain connected remove it
1283          * first.
1284          */
1285         if (port->xdomain) {
1286                 tb_xdomain_remove(port->xdomain);
1287                 tb_port_unconfigure_xdomain(port);
1288                 port->xdomain = NULL;
1289         }
1290
1291         /*
1292          * Do not send uevents until we have discovered all existing
1293          * tunnels and know which switches were authorized already by
1294          * the boot firmware.
1295          */
1296         if (!tcm->hotplug_active) {
1297                 dev_set_uevent_suppress(&sw->dev, true);
1298                 discovery = true;
1299         }
1300
1301         /*
1302          * At the moment Thunderbolt 2 and beyond (devices with LC) we
1303          * can support runtime PM.
1304          */
1305         sw->rpm = sw->generation > 1;
1306
1307         if (tb_switch_add(sw)) {
1308                 tb_switch_put(sw);
1309                 goto out_rpm_put;
1310         }
1311
1312         upstream_port = tb_upstream_port(sw);
1313         tb_configure_link(port, upstream_port, sw);
1314
1315         /*
1316          * CL0s and CL1 are enabled and supported together.
1317          * Silently ignore CLx enabling in case CLx is not supported.
1318          */
1319         if (discovery)
1320                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1321         else if (tb_enable_clx(sw))
1322                 tb_sw_warn(sw, "failed to enable CL states\n");
1323
1324         if (tb_enable_tmu(sw))
1325                 tb_sw_warn(sw, "failed to enable TMU\n");
1326
1327         /*
1328          * Configuration valid needs to be set after the TMU has been
1329          * enabled for the upstream port of the router so we do it here.
1330          */
1331         tb_switch_configuration_valid(sw);
1332
1333         /* Scan upstream retimers */
1334         tb_retimer_scan(upstream_port, true);
1335
1336         /*
1337          * Create USB 3.x tunnels only when the switch is plugged to the
1338          * domain. This is because we scan the domain also during discovery
1339          * and want to discover existing USB 3.x tunnels before we create
1340          * any new.
1341          */
1342         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1343                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1344
1345         tb_add_dp_resources(sw);
1346         tb_scan_switch(sw);
1347
1348 out_rpm_put:
1349         if (port->usb4) {
1350                 pm_runtime_mark_last_busy(&port->usb4->dev);
1351                 pm_runtime_put_autosuspend(&port->usb4->dev);
1352         }
1353 }
1354
1355 static void
1356 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1357 {
1358         struct tb_tunnel *first_tunnel;
1359         struct tb *tb = group->tb;
1360         struct tb_port *in;
1361         int ret;
1362
1363         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1364                group->index);
1365
1366         first_tunnel = NULL;
1367         list_for_each_entry(in, &group->ports, group_list) {
1368                 int estimated_bw, estimated_up, estimated_down;
1369                 struct tb_tunnel *tunnel;
1370                 struct tb_port *out;
1371
1372                 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1373                         continue;
1374
1375                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1376                 if (WARN_ON(!tunnel))
1377                         break;
1378
1379                 if (!first_tunnel) {
1380                         /*
1381                          * Since USB3 bandwidth is shared by all DP
1382                          * tunnels under the host router USB4 port, even
1383                          * if they do not begin from the host router, we
1384                          * can release USB3 bandwidth just once and not
1385                          * for each tunnel separately.
1386                          */
1387                         first_tunnel = tunnel;
1388                         ret = tb_release_unused_usb3_bandwidth(tb,
1389                                 first_tunnel->src_port, first_tunnel->dst_port);
1390                         if (ret) {
1391                                 tb_tunnel_warn(tunnel,
1392                                         "failed to release unused bandwidth\n");
1393                                 break;
1394                         }
1395                 }
1396
1397                 out = tunnel->dst_port;
1398                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1399                                              &estimated_down, true);
1400                 if (ret) {
1401                         tb_tunnel_warn(tunnel,
1402                                 "failed to re-calculate estimated bandwidth\n");
1403                         break;
1404                 }
1405
1406                 /*
1407                  * Estimated bandwidth includes:
1408                  *  - already allocated bandwidth for the DP tunnel
1409                  *  - available bandwidth along the path
1410                  *  - bandwidth allocated for USB 3.x but not used.
1411                  */
1412                 if (tb_tunnel_direction_downstream(tunnel))
1413                         estimated_bw = estimated_down;
1414                 else
1415                         estimated_bw = estimated_up;
1416
1417                 /*
1418                  * If there is reserved bandwidth for the group that is
1419                  * not yet released we report that too.
1420                  */
1421                 tb_tunnel_dbg(tunnel,
1422                               "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
1423                               estimated_bw, group->reserved,
1424                               estimated_bw + group->reserved);
1425
1426                 if (usb4_dp_port_set_estimated_bandwidth(in,
1427                                 estimated_bw + group->reserved))
1428                         tb_tunnel_warn(tunnel,
1429                                        "failed to update estimated bandwidth\n");
1430         }
1431
1432         if (first_tunnel)
1433                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1434                                           first_tunnel->dst_port);
1435
1436         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1437 }
1438
1439 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1440 {
1441         struct tb_cm *tcm = tb_priv(tb);
1442         int i;
1443
1444         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1445
1446         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1447                 struct tb_bandwidth_group *group = &tcm->groups[i];
1448
1449                 if (!list_empty(&group->ports))
1450                         tb_recalc_estimated_bandwidth_for_group(group);
1451         }
1452
1453         tb_dbg(tb, "bandwidth re-calculation done\n");
1454 }
1455
1456 static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
1457 {
1458         if (group->reserved) {
1459                 tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
1460                         group->reserved);
1461                 group->reserved = 0;
1462                 return true;
1463         }
1464         return false;
1465 }
1466
1467 static void __configure_group_sym(struct tb_bandwidth_group *group)
1468 {
1469         struct tb_tunnel *tunnel;
1470         struct tb_port *in;
1471
1472         if (list_empty(&group->ports))
1473                 return;
1474
1475         /*
1476          * All the tunnels in the group go through the same USB4 links
1477          * so we find the first one here and pass the IN and OUT
1478          * adapters to tb_configure_sym() which now transitions the
1479          * links back to symmetric if bandwidth requirement < asym_threshold.
1480          *
1481          * We do this here to avoid unnecessary transitions (for example
1482          * if the graphics released bandwidth for other tunnel in the
1483          * same group).
1484          */
1485         in = list_first_entry(&group->ports, struct tb_port, group_list);
1486         tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
1487         if (tunnel)
1488                 tb_configure_sym(group->tb, in, tunnel->dst_port, true);
1489 }
1490
1491 static void tb_bandwidth_group_release_work(struct work_struct *work)
1492 {
1493         struct tb_bandwidth_group *group =
1494                 container_of(work, typeof(*group), release_work.work);
1495         struct tb *tb = group->tb;
1496
1497         mutex_lock(&tb->lock);
1498         if (__release_group_bandwidth(group))
1499                 tb_recalc_estimated_bandwidth(tb);
1500         __configure_group_sym(group);
1501         mutex_unlock(&tb->lock);
1502 }
1503
1504 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
1505 {
1506         int i;
1507
1508         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1509                 struct tb_bandwidth_group *group = &tcm->groups[i];
1510
1511                 group->tb = tcm_to_tb(tcm);
1512                 group->index = i + 1;
1513                 INIT_LIST_HEAD(&group->ports);
1514                 INIT_DELAYED_WORK(&group->release_work,
1515                                   tb_bandwidth_group_release_work);
1516         }
1517 }
1518
1519 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
1520                                            struct tb_port *in)
1521 {
1522         if (!group || WARN_ON(in->group))
1523                 return;
1524
1525         in->group = group;
1526         list_add_tail(&in->group_list, &group->ports);
1527
1528         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
1529 }
1530
1531 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
1532 {
1533         int i;
1534
1535         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1536                 struct tb_bandwidth_group *group = &tcm->groups[i];
1537
1538                 if (list_empty(&group->ports))
1539                         return group;
1540         }
1541
1542         return NULL;
1543 }
1544
1545 static struct tb_bandwidth_group *
1546 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1547                           struct tb_port *out)
1548 {
1549         struct tb_bandwidth_group *group;
1550         struct tb_tunnel *tunnel;
1551
1552         /*
1553          * Find all DP tunnels that go through all the same USB4 links
1554          * as this one. Because we always setup tunnels the same way we
1555          * can just check for the routers at both ends of the tunnels
1556          * and if they are the same we have a match.
1557          */
1558         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1559                 if (!tb_tunnel_is_dp(tunnel))
1560                         continue;
1561
1562                 if (tunnel->src_port->sw == in->sw &&
1563                     tunnel->dst_port->sw == out->sw) {
1564                         group = tunnel->src_port->group;
1565                         if (group) {
1566                                 tb_bandwidth_group_attach_port(group, in);
1567                                 return group;
1568                         }
1569                 }
1570         }
1571
1572         /* Pick up next available group then */
1573         group = tb_find_free_bandwidth_group(tcm);
1574         if (group)
1575                 tb_bandwidth_group_attach_port(group, in);
1576         else
1577                 tb_port_warn(in, "no available bandwidth groups\n");
1578
1579         return group;
1580 }
1581
1582 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1583                                         struct tb_port *out)
1584 {
1585         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1586                 int index, i;
1587
1588                 index = usb4_dp_port_group_id(in);
1589                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1590                         if (tcm->groups[i].index == index) {
1591                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1592                                 return;
1593                         }
1594                 }
1595         }
1596
1597         tb_attach_bandwidth_group(tcm, in, out);
1598 }
1599
1600 static void tb_detach_bandwidth_group(struct tb_port *in)
1601 {
1602         struct tb_bandwidth_group *group = in->group;
1603
1604         if (group) {
1605                 in->group = NULL;
1606                 list_del_init(&in->group_list);
1607
1608                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1609
1610                 /* No more tunnels so release the reserved bandwidth if any */
1611                 if (list_empty(&group->ports)) {
1612                         cancel_delayed_work(&group->release_work);
1613                         __release_group_bandwidth(group);
1614                 }
1615         }
1616 }
1617
1618 static void tb_discover_tunnels(struct tb *tb)
1619 {
1620         struct tb_cm *tcm = tb_priv(tb);
1621         struct tb_tunnel *tunnel;
1622
1623         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
1624
1625         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1626                 if (tb_tunnel_is_pci(tunnel)) {
1627                         struct tb_switch *parent = tunnel->dst_port->sw;
1628
1629                         while (parent != tunnel->src_port->sw) {
1630                                 parent->boot = true;
1631                                 parent = tb_switch_parent(parent);
1632                         }
1633                 } else if (tb_tunnel_is_dp(tunnel)) {
1634                         struct tb_port *in = tunnel->src_port;
1635                         struct tb_port *out = tunnel->dst_port;
1636
1637                         /* Keep the domain from powering down */
1638                         pm_runtime_get_sync(&in->sw->dev);
1639                         pm_runtime_get_sync(&out->sw->dev);
1640
1641                         tb_discover_bandwidth_group(tcm, in, out);
1642                 }
1643         }
1644 }
1645
1646 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1647 {
1648         struct tb_port *src_port, *dst_port;
1649         struct tb *tb;
1650
1651         if (!tunnel)
1652                 return;
1653
1654         tb_tunnel_deactivate(tunnel);
1655         list_del(&tunnel->list);
1656
1657         tb = tunnel->tb;
1658         src_port = tunnel->src_port;
1659         dst_port = tunnel->dst_port;
1660
1661         switch (tunnel->type) {
1662         case TB_TUNNEL_DP:
1663                 tb_detach_bandwidth_group(src_port);
1664                 /*
1665                  * In case of DP tunnel make sure the DP IN resource is
1666                  * deallocated properly.
1667                  */
1668                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1669                 /*
1670                  * If bandwidth on a link is < asym_threshold
1671                  * transition the link to symmetric.
1672                  */
1673                 tb_configure_sym(tb, src_port, dst_port, true);
1674                 /* Now we can allow the domain to runtime suspend again */
1675                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1676                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1677                 pm_runtime_mark_last_busy(&src_port->sw->dev);
1678                 pm_runtime_put_autosuspend(&src_port->sw->dev);
1679                 fallthrough;
1680
1681         case TB_TUNNEL_USB3:
1682                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1683                 break;
1684
1685         default:
1686                 /*
1687                  * PCIe and DMA tunnels do not consume guaranteed
1688                  * bandwidth.
1689                  */
1690                 break;
1691         }
1692
1693         tb_tunnel_free(tunnel);
1694 }
1695
1696 /*
1697  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1698  */
1699 static void tb_free_invalid_tunnels(struct tb *tb)
1700 {
1701         struct tb_cm *tcm = tb_priv(tb);
1702         struct tb_tunnel *tunnel;
1703         struct tb_tunnel *n;
1704
1705         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1706                 if (tb_tunnel_is_invalid(tunnel))
1707                         tb_deactivate_and_free_tunnel(tunnel);
1708         }
1709 }
1710
1711 /*
1712  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1713  */
1714 static void tb_free_unplugged_children(struct tb_switch *sw)
1715 {
1716         struct tb_port *port;
1717
1718         tb_switch_for_each_port(sw, port) {
1719                 if (!tb_port_has_remote(port))
1720                         continue;
1721
1722                 if (port->remote->sw->is_unplugged) {
1723                         tb_retimer_remove_all(port);
1724                         tb_remove_dp_resources(port->remote->sw);
1725                         tb_switch_unconfigure_link(port->remote->sw);
1726                         tb_switch_set_link_width(port->remote->sw,
1727                                                  TB_LINK_WIDTH_SINGLE);
1728                         tb_switch_remove(port->remote->sw);
1729                         port->remote = NULL;
1730                         if (port->dual_link_port)
1731                                 port->dual_link_port->remote = NULL;
1732                 } else {
1733                         tb_free_unplugged_children(port->remote->sw);
1734                 }
1735         }
1736 }
1737
1738 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1739                                          const struct tb_port *port)
1740 {
1741         struct tb_port *down = NULL;
1742
1743         /*
1744          * To keep plugging devices consistently in the same PCIe
1745          * hierarchy, do mapping here for switch downstream PCIe ports.
1746          */
1747         if (tb_switch_is_usb4(sw)) {
1748                 down = usb4_switch_map_pcie_down(sw, port);
1749         } else if (!tb_route(sw)) {
1750                 int phy_port = tb_phy_port_from_link(port->port);
1751                 int index;
1752
1753                 /*
1754                  * Hard-coded Thunderbolt port to PCIe down port mapping
1755                  * per controller.
1756                  */
1757                 if (tb_switch_is_cactus_ridge(sw) ||
1758                     tb_switch_is_alpine_ridge(sw))
1759                         index = !phy_port ? 6 : 7;
1760                 else if (tb_switch_is_falcon_ridge(sw))
1761                         index = !phy_port ? 6 : 8;
1762                 else if (tb_switch_is_titan_ridge(sw))
1763                         index = !phy_port ? 8 : 9;
1764                 else
1765                         goto out;
1766
1767                 /* Validate the hard-coding */
1768                 if (WARN_ON(index > sw->config.max_port_number))
1769                         goto out;
1770
1771                 down = &sw->ports[index];
1772         }
1773
1774         if (down) {
1775                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1776                         goto out;
1777                 if (tb_pci_port_is_enabled(down))
1778                         goto out;
1779
1780                 return down;
1781         }
1782
1783 out:
1784         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1785 }
1786
1787 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1788 {
1789         struct tb_port *host_port, *port;
1790         struct tb_cm *tcm = tb_priv(tb);
1791
1792         host_port = tb_route(in->sw) ?
1793                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1794
1795         list_for_each_entry(port, &tcm->dp_resources, list) {
1796                 if (!tb_port_is_dpout(port))
1797                         continue;
1798
1799                 if (tb_port_is_enabled(port)) {
1800                         tb_port_dbg(port, "DP OUT in use\n");
1801                         continue;
1802                 }
1803
1804                 tb_port_dbg(port, "DP OUT available\n");
1805
1806                 /*
1807                  * Keep the DP tunnel under the topology starting from
1808                  * the same host router downstream port.
1809                  */
1810                 if (host_port && tb_route(port->sw)) {
1811                         struct tb_port *p;
1812
1813                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1814                         if (p != host_port)
1815                                 continue;
1816                 }
1817
1818                 return port;
1819         }
1820
1821         return NULL;
1822 }
1823
1824 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1825                              struct tb_port *out)
1826 {
1827         int available_up, available_down, ret, link_nr;
1828         struct tb_cm *tcm = tb_priv(tb);
1829         int consumed_up, consumed_down;
1830         struct tb_tunnel *tunnel;
1831
1832         /*
1833          * This is only applicable to links that are not bonded (so
1834          * when Thunderbolt 1 hardware is involved somewhere in the
1835          * topology). For these try to share the DP bandwidth between
1836          * the two lanes.
1837          */
1838         link_nr = 1;
1839         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1840                 if (tb_tunnel_is_dp(tunnel)) {
1841                         link_nr = 0;
1842                         break;
1843                 }
1844         }
1845
1846         /*
1847          * DP stream needs the domain to be active so runtime resume
1848          * both ends of the tunnel.
1849          *
1850          * This should bring the routers in the middle active as well
1851          * and keeps the domain from runtime suspending while the DP
1852          * tunnel is active.
1853          */
1854         pm_runtime_get_sync(&in->sw->dev);
1855         pm_runtime_get_sync(&out->sw->dev);
1856
1857         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1858                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1859                 goto err_rpm_put;
1860         }
1861
1862         if (!tb_attach_bandwidth_group(tcm, in, out))
1863                 goto err_dealloc_dp;
1864
1865         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1866         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1867         if (ret) {
1868                 tb_warn(tb, "failed to release unused bandwidth\n");
1869                 goto err_detach_group;
1870         }
1871
1872         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1873                                      true);
1874         if (ret)
1875                 goto err_reclaim_usb;
1876
1877         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1878                available_up, available_down);
1879
1880         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1881                                     available_down);
1882         if (!tunnel) {
1883                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1884                 goto err_reclaim_usb;
1885         }
1886
1887         if (tb_tunnel_activate(tunnel)) {
1888                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1889                 goto err_free;
1890         }
1891
1892         /* If fail reading tunnel's consumed bandwidth, tear it down */
1893         ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
1894         if (ret)
1895                 goto err_deactivate;
1896
1897         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1898
1899         tb_reclaim_usb3_bandwidth(tb, in, out);
1900         /*
1901          * Transition the links to asymmetric if the consumption exceeds
1902          * the threshold.
1903          */
1904         tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1905
1906         /* Update the domain with the new bandwidth estimation */
1907         tb_recalc_estimated_bandwidth(tb);
1908
1909         /*
1910          * In case of DP tunnel exists, change host router's 1st children
1911          * TMU mode to HiFi for CL0s to work.
1912          */
1913         tb_increase_tmu_accuracy(tunnel);
1914         return true;
1915
1916 err_deactivate:
1917         tb_tunnel_deactivate(tunnel);
1918 err_free:
1919         tb_tunnel_free(tunnel);
1920 err_reclaim_usb:
1921         tb_reclaim_usb3_bandwidth(tb, in, out);
1922 err_detach_group:
1923         tb_detach_bandwidth_group(in);
1924 err_dealloc_dp:
1925         tb_switch_dealloc_dp_resource(in->sw, in);
1926 err_rpm_put:
1927         pm_runtime_mark_last_busy(&out->sw->dev);
1928         pm_runtime_put_autosuspend(&out->sw->dev);
1929         pm_runtime_mark_last_busy(&in->sw->dev);
1930         pm_runtime_put_autosuspend(&in->sw->dev);
1931
1932         return false;
1933 }
1934
1935 static void tb_tunnel_dp(struct tb *tb)
1936 {
1937         struct tb_cm *tcm = tb_priv(tb);
1938         struct tb_port *port, *in, *out;
1939
1940         if (!tb_acpi_may_tunnel_dp()) {
1941                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1942                 return;
1943         }
1944
1945         /*
1946          * Find pair of inactive DP IN and DP OUT adapters and then
1947          * establish a DP tunnel between them.
1948          */
1949         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1950
1951         in = NULL;
1952         out = NULL;
1953         list_for_each_entry(port, &tcm->dp_resources, list) {
1954                 if (!tb_port_is_dpin(port))
1955                         continue;
1956
1957                 if (tb_port_is_enabled(port)) {
1958                         tb_port_dbg(port, "DP IN in use\n");
1959                         continue;
1960                 }
1961
1962                 in = port;
1963                 tb_port_dbg(in, "DP IN available\n");
1964
1965                 out = tb_find_dp_out(tb, port);
1966                 if (out)
1967                         tb_tunnel_one_dp(tb, in, out);
1968                 else
1969                         tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
1970         }
1971
1972         if (!in)
1973                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1974 }
1975
1976 static void tb_enter_redrive(struct tb_port *port)
1977 {
1978         struct tb_switch *sw = port->sw;
1979
1980         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1981                 return;
1982
1983         /*
1984          * If we get hot-unplug for the DP IN port of the host router
1985          * and the DP resource is not available anymore it means there
1986          * is a monitor connected directly to the Type-C port and we are
1987          * in "redrive" mode. For this to work we cannot enter RTD3 so
1988          * we bump up the runtime PM reference count here.
1989          */
1990         if (!tb_port_is_dpin(port))
1991                 return;
1992         if (tb_route(sw))
1993                 return;
1994         if (!tb_switch_query_dp_resource(sw, port)) {
1995                 port->redrive = true;
1996                 pm_runtime_get(&sw->dev);
1997                 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
1998         }
1999 }
2000
2001 static void tb_exit_redrive(struct tb_port *port)
2002 {
2003         struct tb_switch *sw = port->sw;
2004
2005         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2006                 return;
2007
2008         if (!tb_port_is_dpin(port))
2009                 return;
2010         if (tb_route(sw))
2011                 return;
2012         if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
2013                 port->redrive = false;
2014                 pm_runtime_put(&sw->dev);
2015                 tb_port_dbg(port, "exit redrive mode\n");
2016         }
2017 }
2018
2019 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
2020 {
2021         struct tb_port *in, *out;
2022         struct tb_tunnel *tunnel;
2023
2024         if (tb_port_is_dpin(port)) {
2025                 tb_port_dbg(port, "DP IN resource unavailable\n");
2026                 in = port;
2027                 out = NULL;
2028         } else {
2029                 tb_port_dbg(port, "DP OUT resource unavailable\n");
2030                 in = NULL;
2031                 out = port;
2032         }
2033
2034         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
2035         if (tunnel)
2036                 tb_deactivate_and_free_tunnel(tunnel);
2037         else
2038                 tb_enter_redrive(port);
2039         list_del_init(&port->list);
2040
2041         /*
2042          * See if there is another DP OUT port that can be used for
2043          * to create another tunnel.
2044          */
2045         tb_recalc_estimated_bandwidth(tb);
2046         tb_tunnel_dp(tb);
2047 }
2048
2049 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
2050 {
2051         struct tb_cm *tcm = tb_priv(tb);
2052         struct tb_port *p;
2053
2054         if (tb_port_is_enabled(port))
2055                 return;
2056
2057         list_for_each_entry(p, &tcm->dp_resources, list) {
2058                 if (p == port)
2059                         return;
2060         }
2061
2062         tb_port_dbg(port, "DP %s resource available after hotplug\n",
2063                     tb_port_is_dpin(port) ? "IN" : "OUT");
2064         list_add_tail(&port->list, &tcm->dp_resources);
2065         tb_exit_redrive(port);
2066
2067         /* Look for suitable DP IN <-> DP OUT pairs now */
2068         tb_tunnel_dp(tb);
2069 }
2070
2071 static void tb_disconnect_and_release_dp(struct tb *tb)
2072 {
2073         struct tb_cm *tcm = tb_priv(tb);
2074         struct tb_tunnel *tunnel, *n;
2075
2076         /*
2077          * Tear down all DP tunnels and release their resources. They
2078          * will be re-established after resume based on plug events.
2079          */
2080         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
2081                 if (tb_tunnel_is_dp(tunnel))
2082                         tb_deactivate_and_free_tunnel(tunnel);
2083         }
2084
2085         while (!list_empty(&tcm->dp_resources)) {
2086                 struct tb_port *port;
2087
2088                 port = list_first_entry(&tcm->dp_resources,
2089                                         struct tb_port, list);
2090                 list_del_init(&port->list);
2091         }
2092 }
2093
2094 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2095 {
2096         struct tb_tunnel *tunnel;
2097         struct tb_port *up;
2098
2099         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2100         if (WARN_ON(!up))
2101                 return -ENODEV;
2102
2103         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2104         if (WARN_ON(!tunnel))
2105                 return -ENODEV;
2106
2107         tb_switch_xhci_disconnect(sw);
2108
2109         tb_tunnel_deactivate(tunnel);
2110         list_del(&tunnel->list);
2111         tb_tunnel_free(tunnel);
2112         return 0;
2113 }
2114
2115 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2116 {
2117         struct tb_port *up, *down, *port;
2118         struct tb_cm *tcm = tb_priv(tb);
2119         struct tb_tunnel *tunnel;
2120
2121         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2122         if (!up)
2123                 return 0;
2124
2125         /*
2126          * Look up available down port. Since we are chaining it should
2127          * be found right above this switch.
2128          */
2129         port = tb_switch_downstream_port(sw);
2130         down = tb_find_pcie_down(tb_switch_parent(sw), port);
2131         if (!down)
2132                 return 0;
2133
2134         tunnel = tb_tunnel_alloc_pci(tb, up, down);
2135         if (!tunnel)
2136                 return -ENOMEM;
2137
2138         if (tb_tunnel_activate(tunnel)) {
2139                 tb_port_info(up,
2140                              "PCIe tunnel activation failed, aborting\n");
2141                 tb_tunnel_free(tunnel);
2142                 return -EIO;
2143         }
2144
2145         /*
2146          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2147          * here.
2148          */
2149         if (tb_switch_pcie_l1_enable(sw))
2150                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2151
2152         if (tb_switch_xhci_connect(sw))
2153                 tb_sw_warn(sw, "failed to connect xHCI\n");
2154
2155         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2156         return 0;
2157 }
2158
2159 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2160                                     int transmit_path, int transmit_ring,
2161                                     int receive_path, int receive_ring)
2162 {
2163         struct tb_cm *tcm = tb_priv(tb);
2164         struct tb_port *nhi_port, *dst_port;
2165         struct tb_tunnel *tunnel;
2166         struct tb_switch *sw;
2167         int ret;
2168
2169         sw = tb_to_switch(xd->dev.parent);
2170         dst_port = tb_port_at(xd->route, sw);
2171         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2172
2173         mutex_lock(&tb->lock);
2174
2175         /*
2176          * When tunneling DMA paths the link should not enter CL states
2177          * so disable them now.
2178          */
2179         tb_disable_clx(sw);
2180
2181         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2182                                      transmit_ring, receive_path, receive_ring);
2183         if (!tunnel) {
2184                 ret = -ENOMEM;
2185                 goto err_clx;
2186         }
2187
2188         if (tb_tunnel_activate(tunnel)) {
2189                 tb_port_info(nhi_port,
2190                              "DMA tunnel activation failed, aborting\n");
2191                 ret = -EIO;
2192                 goto err_free;
2193         }
2194
2195         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2196         mutex_unlock(&tb->lock);
2197         return 0;
2198
2199 err_free:
2200         tb_tunnel_free(tunnel);
2201 err_clx:
2202         tb_enable_clx(sw);
2203         mutex_unlock(&tb->lock);
2204
2205         return ret;
2206 }
2207
2208 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2209                                           int transmit_path, int transmit_ring,
2210                                           int receive_path, int receive_ring)
2211 {
2212         struct tb_cm *tcm = tb_priv(tb);
2213         struct tb_port *nhi_port, *dst_port;
2214         struct tb_tunnel *tunnel, *n;
2215         struct tb_switch *sw;
2216
2217         sw = tb_to_switch(xd->dev.parent);
2218         dst_port = tb_port_at(xd->route, sw);
2219         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2220
2221         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2222                 if (!tb_tunnel_is_dma(tunnel))
2223                         continue;
2224                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2225                         continue;
2226
2227                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2228                                         receive_path, receive_ring))
2229                         tb_deactivate_and_free_tunnel(tunnel);
2230         }
2231
2232         /*
2233          * Try to re-enable CL states now, it is OK if this fails
2234          * because we may still have another DMA tunnel active through
2235          * the same host router USB4 downstream port.
2236          */
2237         tb_enable_clx(sw);
2238 }
2239
2240 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2241                                        int transmit_path, int transmit_ring,
2242                                        int receive_path, int receive_ring)
2243 {
2244         if (!xd->is_unplugged) {
2245                 mutex_lock(&tb->lock);
2246                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2247                                               transmit_ring, receive_path,
2248                                               receive_ring);
2249                 mutex_unlock(&tb->lock);
2250         }
2251         return 0;
2252 }
2253
2254 /* hotplug handling */
2255
2256 /*
2257  * tb_handle_hotplug() - handle hotplug event
2258  *
2259  * Executes on tb->wq.
2260  */
2261 static void tb_handle_hotplug(struct work_struct *work)
2262 {
2263         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2264         struct tb *tb = ev->tb;
2265         struct tb_cm *tcm = tb_priv(tb);
2266         struct tb_switch *sw;
2267         struct tb_port *port;
2268
2269         /* Bring the domain back from sleep if it was suspended */
2270         pm_runtime_get_sync(&tb->dev);
2271
2272         mutex_lock(&tb->lock);
2273         if (!tcm->hotplug_active)
2274                 goto out; /* during init, suspend or shutdown */
2275
2276         sw = tb_switch_find_by_route(tb, ev->route);
2277         if (!sw) {
2278                 tb_warn(tb,
2279                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2280                         ev->route, ev->port, ev->unplug);
2281                 goto out;
2282         }
2283         if (ev->port > sw->config.max_port_number) {
2284                 tb_warn(tb,
2285                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2286                         ev->route, ev->port, ev->unplug);
2287                 goto put_sw;
2288         }
2289         port = &sw->ports[ev->port];
2290         if (tb_is_upstream_port(port)) {
2291                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2292                        ev->route, ev->port, ev->unplug);
2293                 goto put_sw;
2294         }
2295
2296         pm_runtime_get_sync(&sw->dev);
2297
2298         if (ev->unplug) {
2299                 tb_retimer_remove_all(port);
2300
2301                 if (tb_port_has_remote(port)) {
2302                         tb_port_dbg(port, "switch unplugged\n");
2303                         tb_sw_set_unplugged(port->remote->sw);
2304                         tb_free_invalid_tunnels(tb);
2305                         tb_remove_dp_resources(port->remote->sw);
2306                         tb_switch_tmu_disable(port->remote->sw);
2307                         tb_switch_unconfigure_link(port->remote->sw);
2308                         tb_switch_set_link_width(port->remote->sw,
2309                                                  TB_LINK_WIDTH_SINGLE);
2310                         tb_switch_remove(port->remote->sw);
2311                         port->remote = NULL;
2312                         if (port->dual_link_port)
2313                                 port->dual_link_port->remote = NULL;
2314                         /* Maybe we can create another DP tunnel */
2315                         tb_recalc_estimated_bandwidth(tb);
2316                         tb_tunnel_dp(tb);
2317                 } else if (port->xdomain) {
2318                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2319
2320                         tb_port_dbg(port, "xdomain unplugged\n");
2321                         /*
2322                          * Service drivers are unbound during
2323                          * tb_xdomain_remove() so setting XDomain as
2324                          * unplugged here prevents deadlock if they call
2325                          * tb_xdomain_disable_paths(). We will tear down
2326                          * all the tunnels below.
2327                          */
2328                         xd->is_unplugged = true;
2329                         tb_xdomain_remove(xd);
2330                         port->xdomain = NULL;
2331                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2332                         tb_xdomain_put(xd);
2333                         tb_port_unconfigure_xdomain(port);
2334                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2335                         tb_dp_resource_unavailable(tb, port);
2336                 } else if (!port->port) {
2337                         tb_sw_dbg(sw, "xHCI disconnect request\n");
2338                         tb_switch_xhci_disconnect(sw);
2339                 } else {
2340                         tb_port_dbg(port,
2341                                    "got unplug event for disconnected port, ignoring\n");
2342                 }
2343         } else if (port->remote) {
2344                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2345         } else if (!port->port && sw->authorized) {
2346                 tb_sw_dbg(sw, "xHCI connect request\n");
2347                 tb_switch_xhci_connect(sw);
2348         } else {
2349                 if (tb_port_is_null(port)) {
2350                         tb_port_dbg(port, "hotplug: scanning\n");
2351                         tb_scan_port(port);
2352                         if (!port->remote)
2353                                 tb_port_dbg(port, "hotplug: no switch found\n");
2354                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2355                         tb_dp_resource_available(tb, port);
2356                 }
2357         }
2358
2359         pm_runtime_mark_last_busy(&sw->dev);
2360         pm_runtime_put_autosuspend(&sw->dev);
2361
2362 put_sw:
2363         tb_switch_put(sw);
2364 out:
2365         mutex_unlock(&tb->lock);
2366
2367         pm_runtime_mark_last_busy(&tb->dev);
2368         pm_runtime_put_autosuspend(&tb->dev);
2369
2370         kfree(ev);
2371 }
2372
2373 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2374                                  int *requested_down)
2375 {
2376         int allocated_up, allocated_down, available_up, available_down, ret;
2377         int requested_up_corrected, requested_down_corrected, granularity;
2378         int max_up, max_down, max_up_rounded, max_down_rounded;
2379         struct tb_bandwidth_group *group;
2380         struct tb *tb = tunnel->tb;
2381         struct tb_port *in, *out;
2382         bool downstream;
2383
2384         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2385         if (ret)
2386                 return ret;
2387
2388         in = tunnel->src_port;
2389         out = tunnel->dst_port;
2390
2391         tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2392                       allocated_up, allocated_down);
2393
2394         /*
2395          * If we get rounded up request from graphics side, say HBR2 x 4
2396          * that is 17500 instead of 17280 (this is because of the
2397          * granularity), we allow it too. Here the graphics has already
2398          * negotiated with the DPRX the maximum possible rates (which is
2399          * 17280 in this case).
2400          *
2401          * Since the link cannot go higher than 17280 we use that in our
2402          * calculations but the DP IN adapter Allocated BW write must be
2403          * the same value (17500) otherwise the adapter will mark it as
2404          * failed for graphics.
2405          */
2406         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2407         if (ret)
2408                 goto fail;
2409
2410         ret = usb4_dp_port_granularity(in);
2411         if (ret < 0)
2412                 goto fail;
2413         granularity = ret;
2414
2415         max_up_rounded = roundup(max_up, granularity);
2416         max_down_rounded = roundup(max_down, granularity);
2417
2418         /*
2419          * This will "fix" the request down to the maximum supported
2420          * rate * lanes if it is at the maximum rounded up level.
2421          */
2422         requested_up_corrected = *requested_up;
2423         if (requested_up_corrected == max_up_rounded)
2424                 requested_up_corrected = max_up;
2425         else if (requested_up_corrected < 0)
2426                 requested_up_corrected = 0;
2427         requested_down_corrected = *requested_down;
2428         if (requested_down_corrected == max_down_rounded)
2429                 requested_down_corrected = max_down;
2430         else if (requested_down_corrected < 0)
2431                 requested_down_corrected = 0;
2432
2433         tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2434                       requested_up_corrected, requested_down_corrected);
2435
2436         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2437             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2438                 tb_tunnel_dbg(tunnel,
2439                               "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2440                               requested_up_corrected, requested_down_corrected,
2441                               max_up_rounded, max_down_rounded);
2442                 ret = -ENOBUFS;
2443                 goto fail;
2444         }
2445
2446         downstream = tb_tunnel_direction_downstream(tunnel);
2447         group = in->group;
2448
2449         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2450             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2451                 if (tunnel->bw_mode) {
2452                         int reserved;
2453                         /*
2454                          * If requested bandwidth is less or equal than
2455                          * what is currently allocated to that tunnel we
2456                          * simply change the reservation of the tunnel
2457                          * and add the released bandwidth for the group
2458                          * for the next 10s. Then we release it for
2459                          * others to use.
2460                          */
2461                         if (downstream)
2462                                 reserved = allocated_down - *requested_down;
2463                         else
2464                                 reserved = allocated_up - *requested_up;
2465
2466                         if (reserved > 0) {
2467                                 group->reserved += reserved;
2468                                 tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
2469                                        group->index, reserved, group->reserved);
2470
2471                                 /*
2472                                  * If it was not already pending,
2473                                  * schedule release now. If it is then
2474                                  * postpone it for the next 10s (unless
2475                                  * it is already running in which case
2476                                  * the 10s already expired and we should
2477                                  * give the reserved back to others).
2478                                  */
2479                                 mod_delayed_work(system_wq, &group->release_work,
2480                                         msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
2481                         }
2482                 }
2483
2484                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2485                                                  requested_down);
2486         }
2487
2488         /*
2489          * More bandwidth is requested. Release all the potential
2490          * bandwidth from USB3 first.
2491          */
2492         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2493         if (ret)
2494                 goto fail;
2495
2496         /*
2497          * Then go over all tunnels that cross the same USB4 ports (they
2498          * are also in the same group but we use the same function here
2499          * that we use with the normal bandwidth allocation).
2500          */
2501         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2502                                      true);
2503         if (ret)
2504                 goto reclaim;
2505
2506         tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
2507                       available_up, available_down, group->reserved);
2508
2509         if ((*requested_up >= 0 &&
2510                 available_up + group->reserved >= requested_up_corrected) ||
2511             (*requested_down >= 0 &&
2512                 available_down + group->reserved >= requested_down_corrected)) {
2513                 int released = 0;
2514
2515                 /*
2516                  * If bandwidth on a link is >= asym_threshold
2517                  * transition the link to asymmetric.
2518                  */
2519                 ret = tb_configure_asym(tb, in, out, *requested_up,
2520                                         *requested_down);
2521                 if (ret) {
2522                         tb_configure_sym(tb, in, out, true);
2523                         goto fail;
2524                 }
2525
2526                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2527                                                 requested_down);
2528                 if (ret) {
2529                         tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2530                         tb_configure_sym(tb, in, out, true);
2531                 }
2532
2533                 if (downstream) {
2534                         if (*requested_down > available_down)
2535                                 released = *requested_down - available_down;
2536                 } else {
2537                         if (*requested_up > available_up)
2538                                 released = *requested_up - available_up;
2539                 }
2540                 if (released) {
2541                         group->reserved -= released;
2542                         tb_dbg(tb, "group %d released %d total %d Mb/s\n",
2543                                group->index, released, group->reserved);
2544                 }
2545         } else {
2546                 ret = -ENOBUFS;
2547         }
2548
2549 reclaim:
2550         tb_reclaim_usb3_bandwidth(tb, in, out);
2551 fail:
2552         if (ret && ret != -ENODEV) {
2553                 /*
2554                  * Write back the same allocated (so no change), this
2555                  * makes the DPTX request fail on graphics side.
2556                  */
2557                 tb_tunnel_dbg(tunnel,
2558                               "failing the request by rewriting allocated %d/%d Mb/s\n",
2559                               allocated_up, allocated_down);
2560                 tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
2561         }
2562
2563         return ret;
2564 }
2565
2566 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2567 {
2568         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2569         int requested_bw, requested_up, requested_down, ret;
2570         struct tb_tunnel *tunnel;
2571         struct tb *tb = ev->tb;
2572         struct tb_cm *tcm = tb_priv(tb);
2573         struct tb_switch *sw;
2574         struct tb_port *in;
2575
2576         pm_runtime_get_sync(&tb->dev);
2577
2578         mutex_lock(&tb->lock);
2579         if (!tcm->hotplug_active)
2580                 goto unlock;
2581
2582         sw = tb_switch_find_by_route(tb, ev->route);
2583         if (!sw) {
2584                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2585                         ev->route);
2586                 goto unlock;
2587         }
2588
2589         in = &sw->ports[ev->port];
2590         if (!tb_port_is_dpin(in)) {
2591                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2592                 goto put_sw;
2593         }
2594
2595         tb_port_dbg(in, "handling bandwidth allocation request\n");
2596
2597         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2598         if (!tunnel) {
2599                 tb_port_warn(in, "failed to find tunnel\n");
2600                 goto put_sw;
2601         }
2602
2603         if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2604                 if (tunnel->bw_mode) {
2605                         /*
2606                          * Reset the tunnel back to use the legacy
2607                          * allocation.
2608                          */
2609                         tunnel->bw_mode = false;
2610                         tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
2611                 } else {
2612                         tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2613                 }
2614                 goto put_sw;
2615         }
2616
2617         ret = usb4_dp_port_requested_bandwidth(in);
2618         if (ret < 0) {
2619                 if (ret == -ENODATA) {
2620                         /*
2621                          * There is no request active so this means the
2622                          * BW allocation mode was enabled from graphics
2623                          * side. At this point we know that the graphics
2624                          * driver has read the DRPX capabilities so we
2625                          * can offer an better bandwidth estimatation.
2626                          */
2627                         tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
2628                         tb_recalc_estimated_bandwidth(tb);
2629                 } else {
2630                         tb_port_warn(in, "failed to read requested bandwidth\n");
2631                 }
2632                 goto put_sw;
2633         }
2634         requested_bw = ret;
2635
2636         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2637
2638         if (tb_tunnel_direction_downstream(tunnel)) {
2639                 requested_up = -1;
2640                 requested_down = requested_bw;
2641         } else {
2642                 requested_up = requested_bw;
2643                 requested_down = -1;
2644         }
2645
2646         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2647         if (ret) {
2648                 if (ret == -ENOBUFS)
2649                         tb_tunnel_warn(tunnel,
2650                                        "not enough bandwidth available\n");
2651                 else
2652                         tb_tunnel_warn(tunnel,
2653                                        "failed to change bandwidth allocation\n");
2654         } else {
2655                 tb_tunnel_dbg(tunnel,
2656                               "bandwidth allocation changed to %d/%d Mb/s\n",
2657                               requested_up, requested_down);
2658
2659                 /* Update other clients about the allocation change */
2660                 tb_recalc_estimated_bandwidth(tb);
2661         }
2662
2663 put_sw:
2664         tb_switch_put(sw);
2665 unlock:
2666         mutex_unlock(&tb->lock);
2667
2668         pm_runtime_mark_last_busy(&tb->dev);
2669         pm_runtime_put_autosuspend(&tb->dev);
2670
2671         kfree(ev);
2672 }
2673
2674 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2675 {
2676         struct tb_hotplug_event *ev;
2677
2678         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2679         if (!ev)
2680                 return;
2681
2682         ev->tb = tb;
2683         ev->route = route;
2684         ev->port = port;
2685         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2686         queue_work(tb->wq, &ev->work);
2687 }
2688
2689 static void tb_handle_notification(struct tb *tb, u64 route,
2690                                    const struct cfg_error_pkg *error)
2691 {
2692
2693         switch (error->error) {
2694         case TB_CFG_ERROR_PCIE_WAKE:
2695         case TB_CFG_ERROR_DP_CON_CHANGE:
2696         case TB_CFG_ERROR_DPTX_DISCOVERY:
2697                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2698                         tb_warn(tb, "could not ack notification on %llx\n",
2699                                 route);
2700                 break;
2701
2702         case TB_CFG_ERROR_DP_BW:
2703                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2704                         tb_warn(tb, "could not ack notification on %llx\n",
2705                                 route);
2706                 tb_queue_dp_bandwidth_request(tb, route, error->port);
2707                 break;
2708
2709         default:
2710                 /* Ignore for now */
2711                 break;
2712         }
2713 }
2714
2715 /*
2716  * tb_schedule_hotplug_handler() - callback function for the control channel
2717  *
2718  * Delegates to tb_handle_hotplug.
2719  */
2720 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2721                             const void *buf, size_t size)
2722 {
2723         const struct cfg_event_pkg *pkg = buf;
2724         u64 route = tb_cfg_get_route(&pkg->header);
2725
2726         switch (type) {
2727         case TB_CFG_PKG_ERROR:
2728                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2729                 return;
2730         case TB_CFG_PKG_EVENT:
2731                 break;
2732         default:
2733                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2734                 return;
2735         }
2736
2737         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2738                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2739                         pkg->port);
2740         }
2741
2742         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2743 }
2744
2745 static void tb_stop(struct tb *tb)
2746 {
2747         struct tb_cm *tcm = tb_priv(tb);
2748         struct tb_tunnel *tunnel;
2749         struct tb_tunnel *n;
2750
2751         cancel_delayed_work(&tcm->remove_work);
2752         /* tunnels are only present after everything has been initialized */
2753         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2754                 /*
2755                  * DMA tunnels require the driver to be functional so we
2756                  * tear them down. Other protocol tunnels can be left
2757                  * intact.
2758                  */
2759                 if (tb_tunnel_is_dma(tunnel))
2760                         tb_tunnel_deactivate(tunnel);
2761                 tb_tunnel_free(tunnel);
2762         }
2763         tb_switch_remove(tb->root_switch);
2764         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2765 }
2766
2767 static void tb_deinit(struct tb *tb)
2768 {
2769         struct tb_cm *tcm = tb_priv(tb);
2770         int i;
2771
2772         /* Cancel all the release bandwidth workers */
2773         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
2774                 cancel_delayed_work_sync(&tcm->groups[i].release_work);
2775 }
2776
2777 static int tb_scan_finalize_switch(struct device *dev, void *data)
2778 {
2779         if (tb_is_switch(dev)) {
2780                 struct tb_switch *sw = tb_to_switch(dev);
2781
2782                 /*
2783                  * If we found that the switch was already setup by the
2784                  * boot firmware, mark it as authorized now before we
2785                  * send uevent to userspace.
2786                  */
2787                 if (sw->boot)
2788                         sw->authorized = 1;
2789
2790                 dev_set_uevent_suppress(dev, false);
2791                 kobject_uevent(&dev->kobj, KOBJ_ADD);
2792                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2793         }
2794
2795         return 0;
2796 }
2797
2798 static int tb_start(struct tb *tb, bool reset)
2799 {
2800         struct tb_cm *tcm = tb_priv(tb);
2801         bool discover = true;
2802         int ret;
2803
2804         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2805         if (IS_ERR(tb->root_switch))
2806                 return PTR_ERR(tb->root_switch);
2807
2808         /*
2809          * ICM firmware upgrade needs running firmware and in native
2810          * mode that is not available so disable firmware upgrade of the
2811          * root switch.
2812          *
2813          * However, USB4 routers support NVM firmware upgrade if they
2814          * implement the necessary router operations.
2815          */
2816         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2817         /* All USB4 routers support runtime PM */
2818         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2819
2820         ret = tb_switch_configure(tb->root_switch);
2821         if (ret) {
2822                 tb_switch_put(tb->root_switch);
2823                 return ret;
2824         }
2825
2826         /* Announce the switch to the world */
2827         ret = tb_switch_add(tb->root_switch);
2828         if (ret) {
2829                 tb_switch_put(tb->root_switch);
2830                 return ret;
2831         }
2832
2833         /*
2834          * To support highest CLx state, we set host router's TMU to
2835          * Normal mode.
2836          */
2837         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2838         /* Enable TMU if it is off */
2839         tb_switch_tmu_enable(tb->root_switch);
2840
2841         /*
2842          * Boot firmware might have created tunnels of its own. Since we
2843          * cannot be sure they are usable for us, tear them down and
2844          * reset the ports to handle it as new hotplug for USB4 v1
2845          * routers (for USB4 v2 and beyond we already do host reset).
2846          */
2847         if (reset && tb_switch_is_usb4(tb->root_switch)) {
2848                 discover = false;
2849                 if (usb4_switch_version(tb->root_switch) == 1)
2850                         tb_switch_reset(tb->root_switch);
2851         }
2852
2853         if (discover) {
2854                 /* Full scan to discover devices added before the driver was loaded. */
2855                 tb_scan_switch(tb->root_switch);
2856                 /* Find out tunnels created by the boot firmware */
2857                 tb_discover_tunnels(tb);
2858                 /* Add DP resources from the DP tunnels created by the boot firmware */
2859                 tb_discover_dp_resources(tb);
2860         }
2861
2862         /*
2863          * If the boot firmware did not create USB 3.x tunnels create them
2864          * now for the whole topology.
2865          */
2866         tb_create_usb3_tunnels(tb->root_switch);
2867         /* Add DP IN resources for the root switch */
2868         tb_add_dp_resources(tb->root_switch);
2869         /* Make the discovered switches available to the userspace */
2870         device_for_each_child(&tb->root_switch->dev, NULL,
2871                               tb_scan_finalize_switch);
2872
2873         /* Allow tb_handle_hotplug to progress events */
2874         tcm->hotplug_active = true;
2875         return 0;
2876 }
2877
2878 static int tb_suspend_noirq(struct tb *tb)
2879 {
2880         struct tb_cm *tcm = tb_priv(tb);
2881
2882         tb_dbg(tb, "suspending...\n");
2883         tb_disconnect_and_release_dp(tb);
2884         tb_switch_suspend(tb->root_switch, false);
2885         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2886         tb_dbg(tb, "suspend finished\n");
2887
2888         return 0;
2889 }
2890
2891 static void tb_restore_children(struct tb_switch *sw)
2892 {
2893         struct tb_port *port;
2894
2895         /* No need to restore if the router is already unplugged */
2896         if (sw->is_unplugged)
2897                 return;
2898
2899         if (tb_enable_clx(sw))
2900                 tb_sw_warn(sw, "failed to re-enable CL states\n");
2901
2902         if (tb_enable_tmu(sw))
2903                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2904
2905         tb_switch_configuration_valid(sw);
2906
2907         tb_switch_for_each_port(sw, port) {
2908                 if (!tb_port_has_remote(port) && !port->xdomain)
2909                         continue;
2910
2911                 if (port->remote) {
2912                         tb_switch_set_link_width(port->remote->sw,
2913                                                  port->remote->sw->link_width);
2914                         tb_switch_configure_link(port->remote->sw);
2915
2916                         tb_restore_children(port->remote->sw);
2917                 } else if (port->xdomain) {
2918                         tb_port_configure_xdomain(port, port->xdomain);
2919                 }
2920         }
2921 }
2922
2923 static int tb_resume_noirq(struct tb *tb)
2924 {
2925         struct tb_cm *tcm = tb_priv(tb);
2926         struct tb_tunnel *tunnel, *n;
2927         unsigned int usb3_delay = 0;
2928         LIST_HEAD(tunnels);
2929
2930         tb_dbg(tb, "resuming...\n");
2931
2932         /*
2933          * For non-USB4 hosts (Apple systems) remove any PCIe devices
2934          * the firmware might have setup.
2935          */
2936         if (!tb_switch_is_usb4(tb->root_switch))
2937                 tb_switch_reset(tb->root_switch);
2938
2939         tb_switch_resume(tb->root_switch);
2940         tb_free_invalid_tunnels(tb);
2941         tb_free_unplugged_children(tb->root_switch);
2942         tb_restore_children(tb->root_switch);
2943
2944         /*
2945          * If we get here from suspend to disk the boot firmware or the
2946          * restore kernel might have created tunnels of its own. Since
2947          * we cannot be sure they are usable for us we find and tear
2948          * them down.
2949          */
2950         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2951         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2952                 if (tb_tunnel_is_usb3(tunnel))
2953                         usb3_delay = 500;
2954                 tb_tunnel_deactivate(tunnel);
2955                 tb_tunnel_free(tunnel);
2956         }
2957
2958         /* Re-create our tunnels now */
2959         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2960                 /* USB3 requires delay before it can be re-activated */
2961                 if (tb_tunnel_is_usb3(tunnel)) {
2962                         msleep(usb3_delay);
2963                         /* Only need to do it once */
2964                         usb3_delay = 0;
2965                 }
2966                 tb_tunnel_restart(tunnel);
2967         }
2968         if (!list_empty(&tcm->tunnel_list)) {
2969                 /*
2970                  * the pcie links need some time to get going.
2971                  * 100ms works for me...
2972                  */
2973                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2974                 msleep(100);
2975         }
2976          /* Allow tb_handle_hotplug to progress events */
2977         tcm->hotplug_active = true;
2978         tb_dbg(tb, "resume finished\n");
2979
2980         return 0;
2981 }
2982
2983 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2984 {
2985         struct tb_port *port;
2986         int ret = 0;
2987
2988         tb_switch_for_each_port(sw, port) {
2989                 if (tb_is_upstream_port(port))
2990                         continue;
2991                 if (port->xdomain && port->xdomain->is_unplugged) {
2992                         tb_retimer_remove_all(port);
2993                         tb_xdomain_remove(port->xdomain);
2994                         tb_port_unconfigure_xdomain(port);
2995                         port->xdomain = NULL;
2996                         ret++;
2997                 } else if (port->remote) {
2998                         ret += tb_free_unplugged_xdomains(port->remote->sw);
2999                 }
3000         }
3001
3002         return ret;
3003 }
3004
3005 static int tb_freeze_noirq(struct tb *tb)
3006 {
3007         struct tb_cm *tcm = tb_priv(tb);
3008
3009         tcm->hotplug_active = false;
3010         return 0;
3011 }
3012
3013 static int tb_thaw_noirq(struct tb *tb)
3014 {
3015         struct tb_cm *tcm = tb_priv(tb);
3016
3017         tcm->hotplug_active = true;
3018         return 0;
3019 }
3020
3021 static void tb_complete(struct tb *tb)
3022 {
3023         /*
3024          * Release any unplugged XDomains and if there is a case where
3025          * another domain is swapped in place of unplugged XDomain we
3026          * need to run another rescan.
3027          */
3028         mutex_lock(&tb->lock);
3029         if (tb_free_unplugged_xdomains(tb->root_switch))
3030                 tb_scan_switch(tb->root_switch);
3031         mutex_unlock(&tb->lock);
3032 }
3033
3034 static int tb_runtime_suspend(struct tb *tb)
3035 {
3036         struct tb_cm *tcm = tb_priv(tb);
3037
3038         mutex_lock(&tb->lock);
3039         tb_switch_suspend(tb->root_switch, true);
3040         tcm->hotplug_active = false;
3041         mutex_unlock(&tb->lock);
3042
3043         return 0;
3044 }
3045
3046 static void tb_remove_work(struct work_struct *work)
3047 {
3048         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
3049         struct tb *tb = tcm_to_tb(tcm);
3050
3051         mutex_lock(&tb->lock);
3052         if (tb->root_switch) {
3053                 tb_free_unplugged_children(tb->root_switch);
3054                 tb_free_unplugged_xdomains(tb->root_switch);
3055         }
3056         mutex_unlock(&tb->lock);
3057 }
3058
3059 static int tb_runtime_resume(struct tb *tb)
3060 {
3061         struct tb_cm *tcm = tb_priv(tb);
3062         struct tb_tunnel *tunnel, *n;
3063
3064         mutex_lock(&tb->lock);
3065         tb_switch_resume(tb->root_switch);
3066         tb_free_invalid_tunnels(tb);
3067         tb_restore_children(tb->root_switch);
3068         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
3069                 tb_tunnel_restart(tunnel);
3070         tcm->hotplug_active = true;
3071         mutex_unlock(&tb->lock);
3072
3073         /*
3074          * Schedule cleanup of any unplugged devices. Run this in a
3075          * separate thread to avoid possible deadlock if the device
3076          * removal runtime resumes the unplugged device.
3077          */
3078         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
3079         return 0;
3080 }
3081
3082 static const struct tb_cm_ops tb_cm_ops = {
3083         .start = tb_start,
3084         .stop = tb_stop,
3085         .deinit = tb_deinit,
3086         .suspend_noirq = tb_suspend_noirq,
3087         .resume_noirq = tb_resume_noirq,
3088         .freeze_noirq = tb_freeze_noirq,
3089         .thaw_noirq = tb_thaw_noirq,
3090         .complete = tb_complete,
3091         .runtime_suspend = tb_runtime_suspend,
3092         .runtime_resume = tb_runtime_resume,
3093         .handle_event = tb_handle_event,
3094         .disapprove_switch = tb_disconnect_pci,
3095         .approve_switch = tb_tunnel_pci,
3096         .approve_xdomain_paths = tb_approve_xdomain_paths,
3097         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
3098 };
3099
3100 /*
3101  * During suspend the Thunderbolt controller is reset and all PCIe
3102  * tunnels are lost. The NHI driver will try to reestablish all tunnels
3103  * during resume. This adds device links between the tunneled PCIe
3104  * downstream ports and the NHI so that the device core will make sure
3105  * NHI is resumed first before the rest.
3106  */
3107 static bool tb_apple_add_links(struct tb_nhi *nhi)
3108 {
3109         struct pci_dev *upstream, *pdev;
3110         bool ret;
3111
3112         if (!x86_apple_machine)
3113                 return false;
3114
3115         switch (nhi->pdev->device) {
3116         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
3117         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
3118         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
3119         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
3120                 break;
3121         default:
3122                 return false;
3123         }
3124
3125         upstream = pci_upstream_bridge(nhi->pdev);
3126         while (upstream) {
3127                 if (!pci_is_pcie(upstream))
3128                         return false;
3129                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
3130                         break;
3131                 upstream = pci_upstream_bridge(upstream);
3132         }
3133
3134         if (!upstream)
3135                 return false;
3136
3137         /*
3138          * For each hotplug downstream port, create add device link
3139          * back to NHI so that PCIe tunnels can be re-established after
3140          * sleep.
3141          */
3142         ret = false;
3143         for_each_pci_bridge(pdev, upstream->subordinate) {
3144                 const struct device_link *link;
3145
3146                 if (!pci_is_pcie(pdev))
3147                         continue;
3148                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
3149                     !pdev->is_hotplug_bridge)
3150                         continue;
3151
3152                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
3153                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
3154                                        DL_FLAG_PM_RUNTIME);
3155                 if (link) {
3156                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
3157                                 dev_name(&pdev->dev));
3158                         ret = true;
3159                 } else {
3160                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
3161                                  dev_name(&pdev->dev));
3162                 }
3163         }
3164
3165         return ret;
3166 }
3167
3168 struct tb *tb_probe(struct tb_nhi *nhi)
3169 {
3170         struct tb_cm *tcm;
3171         struct tb *tb;
3172
3173         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
3174         if (!tb)
3175                 return NULL;
3176
3177         if (tb_acpi_may_tunnel_pcie())
3178                 tb->security_level = TB_SECURITY_USER;
3179         else
3180                 tb->security_level = TB_SECURITY_NOPCIE;
3181
3182         tb->cm_ops = &tb_cm_ops;
3183
3184         tcm = tb_priv(tb);
3185         INIT_LIST_HEAD(&tcm->tunnel_list);
3186         INIT_LIST_HEAD(&tcm->dp_resources);
3187         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
3188         tb_init_bandwidth_groups(tcm);
3189
3190         tb_dbg(tb, "using software connection manager\n");
3191
3192         /*
3193          * Device links are needed to make sure we establish tunnels
3194          * before the PCIe/USB stack is resumed so complain here if we
3195          * found them missing.
3196          */
3197         if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
3198                 tb_warn(tb, "device links to tunneled native ports are missing!\n");
3199
3200         return tb;
3201 }