Merge patch series "RISC-V Hibernation Support"
[linux-2.6-block.git] / drivers / thunderbolt / tb.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d6cc51cd 2/*
99cabbb0 3 * Thunderbolt driver - bus logic (NHI independent)
d6cc51cd
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
99cabbb0 6 * Copyright (C) 2019, Intel Corporation
d6cc51cd
AN
7 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
6ac6faee 12#include <linux/pm_runtime.h>
349bfe08 13#include <linux/platform_data/x86/apple.h>
d6cc51cd
AN
14
15#include "tb.h"
7adf6097 16#include "tb_regs.h"
1752b9f7 17#include "tunnel.h"
d6cc51cd 18
6ce35635
MW
19#define TB_TIMEOUT 100 /* ms */
20#define MAX_GROUPS 7 /* max Group_ID is 7 */
7f0a34d7 21
9d3cce0b
MW
22/**
23 * struct tb_cm - Simple Thunderbolt connection manager
24 * @tunnel_list: List of active tunnels
8afe909b 25 * @dp_resources: List of available DP resources for DP tunneling
9d3cce0b
MW
26 * @hotplug_active: tb_handle_hotplug will stop progressing plug
27 * events and exit if this is not set (it needs to
28 * acquire the lock one more time). Used to drain wq
29 * after cfg has been paused.
6ac6faee
MW
30 * @remove_work: Work used to remove any unplugged routers after
31 * runtime resume
6ce35635 32 * @groups: Bandwidth groups used in this domain.
9d3cce0b
MW
33 */
34struct tb_cm {
35 struct list_head tunnel_list;
8afe909b 36 struct list_head dp_resources;
9d3cce0b 37 bool hotplug_active;
6ac6faee 38 struct delayed_work remove_work;
6ce35635 39 struct tb_bandwidth_group groups[MAX_GROUPS];
9d3cce0b 40};
9da672a4 41
6ac6faee
MW
42static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
43{
44 return ((void *)tcm - sizeof(struct tb));
45}
46
4f807e47
MW
47struct tb_hotplug_event {
48 struct work_struct work;
49 struct tb *tb;
50 u64 route;
51 u8 port;
52 bool unplug;
53};
54
6ce35635
MW
55static void tb_init_bandwidth_groups(struct tb_cm *tcm)
56{
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
60 struct tb_bandwidth_group *group = &tcm->groups[i];
61
62 group->tb = tcm_to_tb(tcm);
63 group->index = i + 1;
64 INIT_LIST_HEAD(&group->ports);
65 }
66}
67
68static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
69 struct tb_port *in)
70{
71 if (!group || WARN_ON(in->group))
72 return;
73
74 in->group = group;
75 list_add_tail(&in->group_list, &group->ports);
76
77 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
78}
79
80static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
81{
82 int i;
83
84 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
85 struct tb_bandwidth_group *group = &tcm->groups[i];
86
87 if (list_empty(&group->ports))
88 return group;
89 }
90
91 return NULL;
92}
93
94static struct tb_bandwidth_group *
95tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
96 struct tb_port *out)
97{
98 struct tb_bandwidth_group *group;
99 struct tb_tunnel *tunnel;
100
101 /*
102 * Find all DP tunnels that go through all the same USB4 links
103 * as this one. Because we always setup tunnels the same way we
104 * can just check for the routers at both ends of the tunnels
105 * and if they are the same we have a match.
106 */
107 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
108 if (!tb_tunnel_is_dp(tunnel))
109 continue;
110
111 if (tunnel->src_port->sw == in->sw &&
112 tunnel->dst_port->sw == out->sw) {
113 group = tunnel->src_port->group;
114 if (group) {
115 tb_bandwidth_group_attach_port(group, in);
116 return group;
117 }
118 }
119 }
120
121 /* Pick up next available group then */
122 group = tb_find_free_bandwidth_group(tcm);
123 if (group)
124 tb_bandwidth_group_attach_port(group, in);
125 else
126 tb_port_warn(in, "no available bandwidth groups\n");
127
128 return group;
129}
130
131static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
132 struct tb_port *out)
133{
134 if (usb4_dp_port_bw_mode_enabled(in)) {
135 int index, i;
136
137 index = usb4_dp_port_group_id(in);
138 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
139 if (tcm->groups[i].index == index) {
140 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
141 return;
142 }
143 }
144 }
145
146 tb_attach_bandwidth_group(tcm, in, out);
147}
148
149static void tb_detach_bandwidth_group(struct tb_port *in)
150{
151 struct tb_bandwidth_group *group = in->group;
152
153 if (group) {
154 in->group = NULL;
155 list_del_init(&in->group_list);
156
157 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
158 }
159}
160
4f807e47
MW
161static void tb_handle_hotplug(struct work_struct *work);
162
163static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
164{
165 struct tb_hotplug_event *ev;
166
167 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
168 if (!ev)
169 return;
170
171 ev->tb = tb;
172 ev->route = route;
173 ev->port = port;
174 ev->unplug = unplug;
175 INIT_WORK(&ev->work, tb_handle_hotplug);
176 queue_work(tb->wq, &ev->work);
177}
178
9da672a4
AN
179/* enumeration & hot plug handling */
180
8afe909b
MW
181static void tb_add_dp_resources(struct tb_switch *sw)
182{
183 struct tb_cm *tcm = tb_priv(sw->tb);
184 struct tb_port *port;
185
186 tb_switch_for_each_port(sw, port) {
187 if (!tb_port_is_dpin(port))
188 continue;
189
190 if (!tb_switch_query_dp_resource(sw, port))
191 continue;
192
193 list_add_tail(&port->list, &tcm->dp_resources);
194 tb_port_dbg(port, "DP IN resource available\n");
195 }
196}
197
198static void tb_remove_dp_resources(struct tb_switch *sw)
199{
200 struct tb_cm *tcm = tb_priv(sw->tb);
201 struct tb_port *port, *tmp;
202
203 /* Clear children resources first */
204 tb_switch_for_each_port(sw, port) {
205 if (tb_port_has_remote(port))
206 tb_remove_dp_resources(port->remote->sw);
207 }
208
209 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
210 if (port->sw == sw) {
211 tb_port_dbg(port, "DP OUT resource unavailable\n");
212 list_del_init(&port->list);
213 }
214 }
215}
216
b60e31bf
SM
217static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218{
219 struct tb_cm *tcm = tb_priv(tb);
220 struct tb_port *p;
221
222 list_for_each_entry(p, &tcm->dp_resources, list) {
223 if (p == port)
224 return;
225 }
226
227 tb_port_dbg(port, "DP %s resource available discovered\n",
228 tb_port_is_dpin(port) ? "IN" : "OUT");
229 list_add_tail(&port->list, &tcm->dp_resources);
230}
231
232static void tb_discover_dp_resources(struct tb *tb)
233{
234 struct tb_cm *tcm = tb_priv(tb);
235 struct tb_tunnel *tunnel;
236
237 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238 if (tb_tunnel_is_dp(tunnel))
239 tb_discover_dp_resource(tb, tunnel->dst_port);
240 }
241}
242
43bddb26
MW
243static void tb_switch_discover_tunnels(struct tb_switch *sw,
244 struct list_head *list,
245 bool alloc_hopids)
0414bec5
MW
246{
247 struct tb *tb = sw->tb;
0414bec5 248 struct tb_port *port;
0414bec5 249
b433d010 250 tb_switch_for_each_port(sw, port) {
0414bec5
MW
251 struct tb_tunnel *tunnel = NULL;
252
0414bec5 253 switch (port->config.type) {
4f807e47 254 case TB_TYPE_DP_HDMI_IN:
43bddb26 255 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3084b48f
GF
256 /*
257 * In case of DP tunnel exists, change host router's
258 * 1st children TMU mode to HiFi for CL0s to work.
259 */
260 if (tunnel)
261 tb_switch_enable_tmu_1st_child(tb->root_switch,
262 TB_SWITCH_TMU_RATE_HIFI);
4f807e47
MW
263 break;
264
0414bec5 265 case TB_TYPE_PCIE_DOWN:
43bddb26 266 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
0414bec5
MW
267 break;
268
e6f81858 269 case TB_TYPE_USB3_DOWN:
43bddb26 270 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
e6f81858
RM
271 break;
272
0414bec5
MW
273 default:
274 break;
275 }
276
43bddb26
MW
277 if (tunnel)
278 list_add_tail(&tunnel->list, list);
279 }
4f807e47 280
43bddb26
MW
281 tb_switch_for_each_port(sw, port) {
282 if (tb_port_has_remote(port)) {
283 tb_switch_discover_tunnels(port->remote->sw, list,
284 alloc_hopids);
285 }
286 }
287}
288
289static void tb_discover_tunnels(struct tb *tb)
290{
291 struct tb_cm *tcm = tb_priv(tb);
292 struct tb_tunnel *tunnel;
293
294 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
295
296 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4f807e47 297 if (tb_tunnel_is_pci(tunnel)) {
0414bec5
MW
298 struct tb_switch *parent = tunnel->dst_port->sw;
299
300 while (parent != tunnel->src_port->sw) {
301 parent->boot = true;
302 parent = tb_switch_parent(parent);
303 }
c94732bd 304 } else if (tb_tunnel_is_dp(tunnel)) {
6ce35635
MW
305 struct tb_port *in = tunnel->src_port;
306 struct tb_port *out = tunnel->dst_port;
307
c94732bd 308 /* Keep the domain from powering down */
6ce35635
MW
309 pm_runtime_get_sync(&in->sw->dev);
310 pm_runtime_get_sync(&out->sw->dev);
311
312 tb_discover_bandwidth_group(tcm, in, out);
0414bec5 313 }
0414bec5
MW
314 }
315}
9da672a4 316
f9cad07b 317static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
284652a4
MW
318{
319 if (tb_switch_is_usb4(port->sw))
f9cad07b 320 return usb4_port_configure_xdomain(port, xd);
284652a4
MW
321 return tb_lc_configure_xdomain(port);
322}
323
324static void tb_port_unconfigure_xdomain(struct tb_port *port)
325{
326 if (tb_switch_is_usb4(port->sw))
327 usb4_port_unconfigure_xdomain(port);
328 else
329 tb_lc_unconfigure_xdomain(port);
341d4518
MW
330
331 tb_port_enable(port->dual_link_port);
284652a4
MW
332}
333
7ea4cd6b
MW
334static void tb_scan_xdomain(struct tb_port *port)
335{
336 struct tb_switch *sw = port->sw;
337 struct tb *tb = sw->tb;
338 struct tb_xdomain *xd;
339 u64 route;
340
5ca67688
MW
341 if (!tb_is_xdomain_enabled())
342 return;
343
7ea4cd6b
MW
344 route = tb_downstream_route(port);
345 xd = tb_xdomain_find_by_route(tb, route);
346 if (xd) {
347 tb_xdomain_put(xd);
348 return;
349 }
350
351 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
352 NULL);
353 if (xd) {
354 tb_port_at(route, sw)->xdomain = xd;
f9cad07b 355 tb_port_configure_xdomain(port, xd);
7ea4cd6b
MW
356 tb_xdomain_add(xd);
357 }
358}
359
cf29b9af
RM
360static int tb_enable_tmu(struct tb_switch *sw)
361{
362 int ret;
363
364 /* If it is already enabled in correct mode, don't touch it */
b017a46d 365 if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
cf29b9af
RM
366 return 0;
367
368 ret = tb_switch_tmu_disable(sw);
369 if (ret)
370 return ret;
371
372 ret = tb_switch_tmu_post_time(sw);
373 if (ret)
374 return ret;
375
376 return tb_switch_tmu_enable(sw);
377}
378
e6f81858
RM
379/**
380 * tb_find_unused_port() - return the first inactive port on @sw
381 * @sw: Switch to find the port on
382 * @type: Port type to look for
383 */
384static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
385 enum tb_port_type type)
386{
387 struct tb_port *port;
388
389 tb_switch_for_each_port(sw, port) {
390 if (tb_is_upstream_port(port))
391 continue;
392 if (port->config.type != type)
393 continue;
394 if (!port->cap_adap)
395 continue;
396 if (tb_port_is_enabled(port))
397 continue;
398 return port;
399 }
400 return NULL;
401}
402
403static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
77cfa40f 404 const struct tb_port *port)
e6f81858
RM
405{
406 struct tb_port *down;
407
408 down = usb4_switch_map_usb3_down(sw, port);
77cfa40f 409 if (down && !tb_usb3_port_is_enabled(down))
e6f81858 410 return down;
77cfa40f 411 return NULL;
e6f81858
RM
412}
413
0bd680cd
MW
414static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
415 struct tb_port *src_port,
416 struct tb_port *dst_port)
417{
418 struct tb_cm *tcm = tb_priv(tb);
419 struct tb_tunnel *tunnel;
420
421 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
422 if (tunnel->type == type &&
423 ((src_port && src_port == tunnel->src_port) ||
424 (dst_port && dst_port == tunnel->dst_port))) {
425 return tunnel;
426 }
427 }
428
429 return NULL;
430}
431
432static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
433 struct tb_port *src_port,
434 struct tb_port *dst_port)
435{
436 struct tb_port *port, *usb3_down;
437 struct tb_switch *sw;
438
439 /* Pick the router that is deepest in the topology */
440 if (dst_port->sw->config.depth > src_port->sw->config.depth)
441 sw = dst_port->sw;
442 else
443 sw = src_port->sw;
444
445 /* Can't be the host router */
446 if (sw == tb->root_switch)
447 return NULL;
448
449 /* Find the downstream USB4 port that leads to this router */
450 port = tb_port_at(tb_route(sw), tb->root_switch);
451 /* Find the corresponding host router USB3 downstream port */
452 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
453 if (!usb3_down)
454 return NULL;
455
456 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
457}
458
459static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
460 struct tb_port *dst_port, int *available_up, int *available_down)
461{
462 int usb3_consumed_up, usb3_consumed_down, ret;
463 struct tb_cm *tcm = tb_priv(tb);
464 struct tb_tunnel *tunnel;
465 struct tb_port *port;
466
2426fdf7
MW
467 tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
468 tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
469 dst_port->port);
0bd680cd
MW
470
471 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6ce35635
MW
472 if (tunnel && tunnel->src_port != src_port &&
473 tunnel->dst_port != dst_port) {
0bd680cd
MW
474 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
475 &usb3_consumed_down);
476 if (ret)
477 return ret;
478 } else {
479 usb3_consumed_up = 0;
480 usb3_consumed_down = 0;
481 }
482
483 *available_up = *available_down = 40000;
484
485 /* Find the minimum available bandwidth over all links */
486 tb_for_each_port_on_path(src_port, dst_port, port) {
487 int link_speed, link_width, up_bw, down_bw;
488
489 if (!tb_port_is_null(port))
490 continue;
491
492 if (tb_is_upstream_port(port)) {
493 link_speed = port->sw->link_speed;
494 } else {
495 link_speed = tb_port_get_link_speed(port);
496 if (link_speed < 0)
497 return link_speed;
498 }
499
500 link_width = port->bonded ? 2 : 1;
501
502 up_bw = link_speed * link_width * 1000; /* Mb/s */
503 /* Leave 10% guard band */
504 up_bw -= up_bw / 10;
505 down_bw = up_bw;
506
2426fdf7
MW
507 tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
508 down_bw);
0bd680cd
MW
509
510 /*
511 * Find all DP tunnels that cross the port and reduce
512 * their consumed bandwidth from the available.
513 */
514 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
515 int dp_consumed_up, dp_consumed_down;
516
6ce35635
MW
517 if (tb_tunnel_is_invalid(tunnel))
518 continue;
519
0bd680cd
MW
520 if (!tb_tunnel_is_dp(tunnel))
521 continue;
522
523 if (!tb_tunnel_port_on_path(tunnel, port))
524 continue;
525
6ce35635
MW
526 /*
527 * Ignore the DP tunnel between src_port and
528 * dst_port because it is the same tunnel and we
529 * may be re-calculating estimated bandwidth.
530 */
531 if (tunnel->src_port == src_port &&
532 tunnel->dst_port == dst_port)
533 continue;
534
0bd680cd
MW
535 ret = tb_tunnel_consumed_bandwidth(tunnel,
536 &dp_consumed_up,
537 &dp_consumed_down);
538 if (ret)
539 return ret;
540
541 up_bw -= dp_consumed_up;
542 down_bw -= dp_consumed_down;
543 }
544
545 /*
546 * If USB3 is tunneled from the host router down to the
547 * branch leading to port we need to take USB3 consumed
548 * bandwidth into account regardless whether it actually
549 * crosses the port.
550 */
551 up_bw -= usb3_consumed_up;
552 down_bw -= usb3_consumed_down;
553
554 if (up_bw < *available_up)
555 *available_up = up_bw;
556 if (down_bw < *available_down)
557 *available_down = down_bw;
558 }
559
560 if (*available_up < 0)
561 *available_up = 0;
562 if (*available_down < 0)
563 *available_down = 0;
564
565 return 0;
566}
567
568static int tb_release_unused_usb3_bandwidth(struct tb *tb,
569 struct tb_port *src_port,
570 struct tb_port *dst_port)
571{
572 struct tb_tunnel *tunnel;
573
574 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
575 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
576}
577
578static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
579 struct tb_port *dst_port)
580{
581 int ret, available_up, available_down;
582 struct tb_tunnel *tunnel;
583
584 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
585 if (!tunnel)
586 return;
587
588 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
589
590 /*
591 * Calculate available bandwidth for the first hop USB3 tunnel.
592 * That determines the whole USB3 bandwidth for this branch.
593 */
594 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
595 &available_up, &available_down);
596 if (ret) {
597 tb_warn(tb, "failed to calculate available bandwidth\n");
598 return;
599 }
600
601 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
602 available_up, available_down);
603
604 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
605}
606
e6f81858
RM
607static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
608{
609 struct tb_switch *parent = tb_switch_parent(sw);
0bd680cd 610 int ret, available_up, available_down;
e6f81858
RM
611 struct tb_port *up, *down, *port;
612 struct tb_cm *tcm = tb_priv(tb);
613 struct tb_tunnel *tunnel;
614
c6da62a2
MW
615 if (!tb_acpi_may_tunnel_usb3()) {
616 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
617 return 0;
618 }
619
e6f81858
RM
620 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
621 if (!up)
622 return 0;
623
bbcf40b3
MW
624 if (!sw->link_usb4)
625 return 0;
626
e6f81858
RM
627 /*
628 * Look up available down port. Since we are chaining it should
629 * be found right above this switch.
630 */
631 port = tb_port_at(tb_route(sw), parent);
632 down = tb_find_usb3_down(parent, port);
633 if (!down)
634 return 0;
635
636 if (tb_route(parent)) {
637 struct tb_port *parent_up;
638 /*
639 * Check first that the parent switch has its upstream USB3
640 * port enabled. Otherwise the chain is not complete and
641 * there is no point setting up a new tunnel.
642 */
643 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
644 if (!parent_up || !tb_port_is_enabled(parent_up))
645 return 0;
0bd680cd
MW
646
647 /* Make all unused bandwidth available for the new tunnel */
648 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
649 if (ret)
650 return ret;
e6f81858
RM
651 }
652
0bd680cd
MW
653 ret = tb_available_bandwidth(tb, down, up, &available_up,
654 &available_down);
655 if (ret)
656 goto err_reclaim;
657
658 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
659 available_up, available_down);
660
661 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
662 available_down);
663 if (!tunnel) {
664 ret = -ENOMEM;
665 goto err_reclaim;
666 }
e6f81858
RM
667
668 if (tb_tunnel_activate(tunnel)) {
669 tb_port_info(up,
670 "USB3 tunnel activation failed, aborting\n");
0bd680cd
MW
671 ret = -EIO;
672 goto err_free;
e6f81858
RM
673 }
674
675 list_add_tail(&tunnel->list, &tcm->tunnel_list);
0bd680cd
MW
676 if (tb_route(parent))
677 tb_reclaim_usb3_bandwidth(tb, down, up);
678
e6f81858 679 return 0;
0bd680cd
MW
680
681err_free:
682 tb_tunnel_free(tunnel);
683err_reclaim:
684 if (tb_route(parent))
685 tb_reclaim_usb3_bandwidth(tb, down, up);
686
687 return ret;
e6f81858
RM
688}
689
690static int tb_create_usb3_tunnels(struct tb_switch *sw)
691{
692 struct tb_port *port;
693 int ret;
694
c6da62a2
MW
695 if (!tb_acpi_may_tunnel_usb3())
696 return 0;
697
e6f81858
RM
698 if (tb_route(sw)) {
699 ret = tb_tunnel_usb3(sw->tb, sw);
700 if (ret)
701 return ret;
702 }
703
704 tb_switch_for_each_port(sw, port) {
705 if (!tb_port_has_remote(port))
706 continue;
707 ret = tb_create_usb3_tunnels(port->remote->sw);
708 if (ret)
709 return ret;
710 }
711
712 return 0;
713}
714
9da672a4
AN
715static void tb_scan_port(struct tb_port *port);
716
877e50b3 717/*
9da672a4
AN
718 * tb_scan_switch() - scan for and initialize downstream switches
719 */
720static void tb_scan_switch(struct tb_switch *sw)
721{
b433d010
MW
722 struct tb_port *port;
723
6ac6faee
MW
724 pm_runtime_get_sync(&sw->dev);
725
b433d010
MW
726 tb_switch_for_each_port(sw, port)
727 tb_scan_port(port);
6ac6faee
MW
728
729 pm_runtime_mark_last_busy(&sw->dev);
730 pm_runtime_put_autosuspend(&sw->dev);
9da672a4
AN
731}
732
877e50b3 733/*
9da672a4
AN
734 * tb_scan_port() - check for and initialize switches below port
735 */
736static void tb_scan_port(struct tb_port *port)
737{
99cabbb0 738 struct tb_cm *tcm = tb_priv(port->sw->tb);
dfe40ca4 739 struct tb_port *upstream_port;
9da672a4 740 struct tb_switch *sw;
990f4b85 741 int ret;
dfe40ca4 742
9da672a4
AN
743 if (tb_is_upstream_port(port))
744 return;
4f807e47
MW
745
746 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
747 !tb_dp_port_is_enabled(port)) {
748 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
749 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
750 false);
751 return;
752 }
753
9da672a4
AN
754 if (port->config.type != TB_TYPE_PORT)
755 return;
343fcb8c
AN
756 if (port->dual_link_port && port->link_nr)
757 return; /*
758 * Downstream switch is reachable through two ports.
759 * Only scan on the primary port (link_nr == 0).
760 */
23257cfc
MW
761
762 if (port->usb4)
763 pm_runtime_get_sync(&port->usb4->dev);
764
9da672a4 765 if (tb_wait_for_port(port, false) <= 0)
23257cfc 766 goto out_rpm_put;
9da672a4 767 if (port->remote) {
7ea4cd6b 768 tb_port_dbg(port, "port already has a remote\n");
23257cfc 769 goto out_rpm_put;
9da672a4 770 }
dacb1287 771
3fb10ea4 772 tb_retimer_scan(port, true);
dacb1287 773
bfe778ac
MW
774 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
775 tb_downstream_route(port));
7ea4cd6b
MW
776 if (IS_ERR(sw)) {
777 /*
778 * If there is an error accessing the connected switch
779 * it may be connected to another domain. Also we allow
780 * the other domain to be connected to a max depth switch.
781 */
782 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
783 tb_scan_xdomain(port);
23257cfc 784 goto out_rpm_put;
7ea4cd6b 785 }
bfe778ac
MW
786
787 if (tb_switch_configure(sw)) {
788 tb_switch_put(sw);
23257cfc 789 goto out_rpm_put;
bfe778ac
MW
790 }
791
7ea4cd6b
MW
792 /*
793 * If there was previously another domain connected remove it
794 * first.
795 */
796 if (port->xdomain) {
797 tb_xdomain_remove(port->xdomain);
284652a4 798 tb_port_unconfigure_xdomain(port);
7ea4cd6b
MW
799 port->xdomain = NULL;
800 }
801
99cabbb0
MW
802 /*
803 * Do not send uevents until we have discovered all existing
804 * tunnels and know which switches were authorized already by
805 * the boot firmware.
806 */
807 if (!tcm->hotplug_active)
808 dev_set_uevent_suppress(&sw->dev, true);
f67cf491 809
6ac6faee
MW
810 /*
811 * At the moment Thunderbolt 2 and beyond (devices with LC) we
812 * can support runtime PM.
813 */
814 sw->rpm = sw->generation > 1;
815
bfe778ac
MW
816 if (tb_switch_add(sw)) {
817 tb_switch_put(sw);
23257cfc 818 goto out_rpm_put;
bfe778ac
MW
819 }
820
dfe40ca4
MW
821 /* Link the switches using both links if available */
822 upstream_port = tb_upstream_port(sw);
823 port->remote = upstream_port;
824 upstream_port->remote = port;
825 if (port->dual_link_port && upstream_port->dual_link_port) {
826 port->dual_link_port->remote = upstream_port->dual_link_port;
827 upstream_port->dual_link_port->remote = port->dual_link_port;
828 }
829
91c0c120 830 /* Enable lane bonding if supported */
2ca3263a 831 tb_switch_lane_bonding_enable(sw);
de462039
MW
832 /* Set the link configured */
833 tb_switch_configure_link(sw);
b017a46d
GF
834 /*
835 * CL0s and CL1 are enabled and supported together.
836 * Silently ignore CLx enabling in case CLx is not supported.
837 */
838 ret = tb_switch_enable_clx(sw, TB_CL1);
990f4b85 839 if (ret && ret != -EOPNOTSUPP)
b017a46d
GF
840 tb_sw_warn(sw, "failed to enable %s on upstream port\n",
841 tb_switch_clx_name(TB_CL1));
8a90e4fa 842
b017a46d
GF
843 if (tb_switch_is_clx_enabled(sw, TB_CL1))
844 /*
845 * To support highest CLx state, we set router's TMU to
846 * Normal-Uni mode.
847 */
848 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
849 else
850 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
851 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
91c0c120 852
cf29b9af
RM
853 if (tb_enable_tmu(sw))
854 tb_sw_warn(sw, "failed to enable TMU\n");
855
dacb1287 856 /* Scan upstream retimers */
3fb10ea4 857 tb_retimer_scan(upstream_port, true);
dacb1287 858
e6f81858
RM
859 /*
860 * Create USB 3.x tunnels only when the switch is plugged to the
861 * domain. This is because we scan the domain also during discovery
862 * and want to discover existing USB 3.x tunnels before we create
863 * any new.
864 */
865 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
866 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
867
e876f34a 868 tb_add_dp_resources(sw);
9da672a4 869 tb_scan_switch(sw);
23257cfc
MW
870
871out_rpm_put:
872 if (port->usb4) {
873 pm_runtime_mark_last_busy(&port->usb4->dev);
874 pm_runtime_put_autosuspend(&port->usb4->dev);
875 }
9da672a4
AN
876}
877
8afe909b
MW
878static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
879{
0bd680cd
MW
880 struct tb_port *src_port, *dst_port;
881 struct tb *tb;
882
8afe909b
MW
883 if (!tunnel)
884 return;
885
886 tb_tunnel_deactivate(tunnel);
887 list_del(&tunnel->list);
888
0bd680cd
MW
889 tb = tunnel->tb;
890 src_port = tunnel->src_port;
891 dst_port = tunnel->dst_port;
892
893 switch (tunnel->type) {
894 case TB_TUNNEL_DP:
6ce35635 895 tb_detach_bandwidth_group(src_port);
0bd680cd
MW
896 /*
897 * In case of DP tunnel make sure the DP IN resource is
898 * deallocated properly.
899 */
900 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
6ac6faee
MW
901 /* Now we can allow the domain to runtime suspend again */
902 pm_runtime_mark_last_busy(&dst_port->sw->dev);
903 pm_runtime_put_autosuspend(&dst_port->sw->dev);
904 pm_runtime_mark_last_busy(&src_port->sw->dev);
905 pm_runtime_put_autosuspend(&src_port->sw->dev);
0bd680cd
MW
906 fallthrough;
907
908 case TB_TUNNEL_USB3:
909 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
910 break;
8afe909b 911
0bd680cd
MW
912 default:
913 /*
914 * PCIe and DMA tunnels do not consume guaranteed
915 * bandwidth.
916 */
917 break;
8afe909b
MW
918 }
919
920 tb_tunnel_free(tunnel);
4f807e47
MW
921}
922
877e50b3 923/*
3364f0c1
AN
924 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
925 */
926static void tb_free_invalid_tunnels(struct tb *tb)
927{
9d3cce0b 928 struct tb_cm *tcm = tb_priv(tb);
93f36ade
MW
929 struct tb_tunnel *tunnel;
930 struct tb_tunnel *n;
9d3cce0b
MW
931
932 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
8afe909b
MW
933 if (tb_tunnel_is_invalid(tunnel))
934 tb_deactivate_and_free_tunnel(tunnel);
3364f0c1
AN
935 }
936}
937
877e50b3 938/*
23dd5bb4
AN
939 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
940 */
941static void tb_free_unplugged_children(struct tb_switch *sw)
942{
b433d010 943 struct tb_port *port;
dfe40ca4 944
b433d010 945 tb_switch_for_each_port(sw, port) {
dfe40ca4 946 if (!tb_port_has_remote(port))
23dd5bb4 947 continue;
dfe40ca4 948
23dd5bb4 949 if (port->remote->sw->is_unplugged) {
dacb1287 950 tb_retimer_remove_all(port);
8afe909b 951 tb_remove_dp_resources(port->remote->sw);
de462039 952 tb_switch_unconfigure_link(port->remote->sw);
91c0c120 953 tb_switch_lane_bonding_disable(port->remote->sw);
bfe778ac 954 tb_switch_remove(port->remote->sw);
23dd5bb4 955 port->remote = NULL;
dfe40ca4
MW
956 if (port->dual_link_port)
957 port->dual_link_port->remote = NULL;
23dd5bb4
AN
958 } else {
959 tb_free_unplugged_children(port->remote->sw);
960 }
961 }
962}
963
99cabbb0
MW
964static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
965 const struct tb_port *port)
3364f0c1 966{
b0407983
MW
967 struct tb_port *down = NULL;
968
99cabbb0
MW
969 /*
970 * To keep plugging devices consistently in the same PCIe
b0407983 971 * hierarchy, do mapping here for switch downstream PCIe ports.
99cabbb0 972 */
b0407983
MW
973 if (tb_switch_is_usb4(sw)) {
974 down = usb4_switch_map_pcie_down(sw, port);
975 } else if (!tb_route(sw)) {
99cabbb0
MW
976 int phy_port = tb_phy_port_from_link(port->port);
977 int index;
9d3cce0b 978
99cabbb0
MW
979 /*
980 * Hard-coded Thunderbolt port to PCIe down port mapping
981 * per controller.
982 */
7bffd97e
MW
983 if (tb_switch_is_cactus_ridge(sw) ||
984 tb_switch_is_alpine_ridge(sw))
99cabbb0 985 index = !phy_port ? 6 : 7;
17a8f815 986 else if (tb_switch_is_falcon_ridge(sw))
99cabbb0 987 index = !phy_port ? 6 : 8;
7bffd97e
MW
988 else if (tb_switch_is_titan_ridge(sw))
989 index = !phy_port ? 8 : 9;
99cabbb0
MW
990 else
991 goto out;
992
993 /* Validate the hard-coding */
994 if (WARN_ON(index > sw->config.max_port_number))
995 goto out;
b0407983
MW
996
997 down = &sw->ports[index];
998 }
999
1000 if (down) {
1001 if (WARN_ON(!tb_port_is_pcie_down(down)))
99cabbb0 1002 goto out;
9cac51a0 1003 if (tb_pci_port_is_enabled(down))
99cabbb0
MW
1004 goto out;
1005
b0407983 1006 return down;
99cabbb0 1007 }
3364f0c1 1008
99cabbb0 1009out:
e78db6f0 1010 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
99cabbb0 1011}
3364f0c1 1012
6ce35635
MW
1013static void
1014tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1015{
1016 struct tb_tunnel *first_tunnel;
1017 struct tb *tb = group->tb;
1018 struct tb_port *in;
1019 int ret;
1020
1021 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1022 group->index);
1023
1024 first_tunnel = NULL;
1025 list_for_each_entry(in, &group->ports, group_list) {
1026 int estimated_bw, estimated_up, estimated_down;
1027 struct tb_tunnel *tunnel;
1028 struct tb_port *out;
1029
1030 if (!usb4_dp_port_bw_mode_enabled(in))
1031 continue;
1032
1033 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1034 if (WARN_ON(!tunnel))
1035 break;
1036
1037 if (!first_tunnel) {
1038 /*
1039 * Since USB3 bandwidth is shared by all DP
1040 * tunnels under the host router USB4 port, even
1041 * if they do not begin from the host router, we
1042 * can release USB3 bandwidth just once and not
1043 * for each tunnel separately.
1044 */
1045 first_tunnel = tunnel;
1046 ret = tb_release_unused_usb3_bandwidth(tb,
1047 first_tunnel->src_port, first_tunnel->dst_port);
1048 if (ret) {
1049 tb_port_warn(in,
1050 "failed to release unused bandwidth\n");
1051 break;
1052 }
1053 }
1054
1055 out = tunnel->dst_port;
1056 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1057 &estimated_down);
1058 if (ret) {
1059 tb_port_warn(in,
1060 "failed to re-calculate estimated bandwidth\n");
1061 break;
1062 }
1063
1064 /*
1065 * Estimated bandwidth includes:
1066 * - already allocated bandwidth for the DP tunnel
1067 * - available bandwidth along the path
1068 * - bandwidth allocated for USB 3.x but not used.
1069 */
1070 tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
1071 estimated_up, estimated_down);
1072
1073 if (in->sw->config.depth < out->sw->config.depth)
1074 estimated_bw = estimated_down;
1075 else
1076 estimated_bw = estimated_up;
1077
1078 if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
1079 tb_port_warn(in, "failed to update estimated bandwidth\n");
1080 }
1081
1082 if (first_tunnel)
1083 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1084 first_tunnel->dst_port);
1085
1086 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1087}
1088
1089static void tb_recalc_estimated_bandwidth(struct tb *tb)
1090{
1091 struct tb_cm *tcm = tb_priv(tb);
1092 int i;
1093
1094 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1095
1096 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1097 struct tb_bandwidth_group *group = &tcm->groups[i];
1098
1099 if (!list_empty(&group->ports))
1100 tb_recalc_estimated_bandwidth_for_group(group);
1101 }
1102
1103 tb_dbg(tb, "bandwidth re-calculation done\n");
1104}
1105
e876f34a
MW
1106static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1107{
1108 struct tb_port *host_port, *port;
1109 struct tb_cm *tcm = tb_priv(tb);
1110
1111 host_port = tb_route(in->sw) ?
1112 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1113
1114 list_for_each_entry(port, &tcm->dp_resources, list) {
1115 if (!tb_port_is_dpout(port))
1116 continue;
1117
1118 if (tb_port_is_enabled(port)) {
b0ef48fc 1119 tb_port_dbg(port, "DP OUT in use\n");
e876f34a
MW
1120 continue;
1121 }
1122
1123 tb_port_dbg(port, "DP OUT available\n");
1124
1125 /*
1126 * Keep the DP tunnel under the topology starting from
1127 * the same host router downstream port.
1128 */
1129 if (host_port && tb_route(port->sw)) {
1130 struct tb_port *p;
1131
1132 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1133 if (p != host_port)
1134 continue;
1135 }
1136
1137 return port;
1138 }
1139
1140 return NULL;
1141}
1142
8afe909b 1143static void tb_tunnel_dp(struct tb *tb)
4f807e47 1144{
9d2d0a5c 1145 int available_up, available_down, ret, link_nr;
4f807e47 1146 struct tb_cm *tcm = tb_priv(tb);
8afe909b 1147 struct tb_port *port, *in, *out;
4f807e47 1148 struct tb_tunnel *tunnel;
4f807e47 1149
c6da62a2
MW
1150 if (!tb_acpi_may_tunnel_dp()) {
1151 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1152 return;
1153 }
1154
8afe909b
MW
1155 /*
1156 * Find pair of inactive DP IN and DP OUT adapters and then
1157 * establish a DP tunnel between them.
1158 */
1159 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1160
1161 in = NULL;
1162 out = NULL;
1163 list_for_each_entry(port, &tcm->dp_resources, list) {
e876f34a
MW
1164 if (!tb_port_is_dpin(port))
1165 continue;
1166
8afe909b 1167 if (tb_port_is_enabled(port)) {
b0ef48fc 1168 tb_port_dbg(port, "DP IN in use\n");
8afe909b
MW
1169 continue;
1170 }
4f807e47 1171
e876f34a 1172 tb_port_dbg(port, "DP IN available\n");
8afe909b 1173
e876f34a
MW
1174 out = tb_find_dp_out(tb, port);
1175 if (out) {
8afe909b 1176 in = port;
e876f34a
MW
1177 break;
1178 }
8afe909b
MW
1179 }
1180
1181 if (!in) {
1182 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1183 return;
1184 }
1185 if (!out) {
1186 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1187 return;
1188 }
1189
9d2d0a5c
MW
1190 /*
1191 * This is only applicable to links that are not bonded (so
1192 * when Thunderbolt 1 hardware is involved somewhere in the
1193 * topology). For these try to share the DP bandwidth between
1194 * the two lanes.
1195 */
1196 link_nr = 1;
1197 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1198 if (tb_tunnel_is_dp(tunnel)) {
1199 link_nr = 0;
1200 break;
1201 }
1202 }
1203
6ac6faee
MW
1204 /*
1205 * DP stream needs the domain to be active so runtime resume
1206 * both ends of the tunnel.
1207 *
1208 * This should bring the routers in the middle active as well
1209 * and keeps the domain from runtime suspending while the DP
1210 * tunnel is active.
1211 */
1212 pm_runtime_get_sync(&in->sw->dev);
1213 pm_runtime_get_sync(&out->sw->dev);
1214
8afe909b
MW
1215 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1216 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
6ac6faee 1217 goto err_rpm_put;
8afe909b 1218 }
4f807e47 1219
6ce35635
MW
1220 if (!tb_attach_bandwidth_group(tcm, in, out))
1221 goto err_dealloc_dp;
1222
0bd680cd
MW
1223 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1224 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1225 if (ret) {
1226 tb_warn(tb, "failed to release unused bandwidth\n");
6ce35635 1227 goto err_detach_group;
a11b88ad
MW
1228 }
1229
6ce35635 1230 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
0bd680cd 1231 if (ret)
6ce35635 1232 goto err_reclaim_usb;
0bd680cd
MW
1233
1234 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1235 available_up, available_down);
a11b88ad 1236
9d2d0a5c
MW
1237 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1238 available_down);
4f807e47 1239 if (!tunnel) {
8afe909b 1240 tb_port_dbg(out, "could not allocate DP tunnel\n");
6ce35635 1241 goto err_reclaim_usb;
4f807e47
MW
1242 }
1243
1244 if (tb_tunnel_activate(tunnel)) {
1245 tb_port_info(out, "DP tunnel activation failed, aborting\n");
0bd680cd 1246 goto err_free;
4f807e47
MW
1247 }
1248
1249 list_add_tail(&tunnel->list, &tcm->tunnel_list);
0bd680cd 1250 tb_reclaim_usb3_bandwidth(tb, in, out);
6ce35635
MW
1251
1252 /* Update the domain with the new bandwidth estimation */
1253 tb_recalc_estimated_bandwidth(tb);
1254
3084b48f
GF
1255 /*
1256 * In case of DP tunnel exists, change host router's 1st children
1257 * TMU mode to HiFi for CL0s to work.
1258 */
1259 tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
1260
8afe909b
MW
1261 return;
1262
0bd680cd
MW
1263err_free:
1264 tb_tunnel_free(tunnel);
6ce35635 1265err_reclaim_usb:
0bd680cd 1266 tb_reclaim_usb3_bandwidth(tb, in, out);
6ce35635
MW
1267err_detach_group:
1268 tb_detach_bandwidth_group(in);
0bd680cd 1269err_dealloc_dp:
8afe909b 1270 tb_switch_dealloc_dp_resource(in->sw, in);
6ac6faee
MW
1271err_rpm_put:
1272 pm_runtime_mark_last_busy(&out->sw->dev);
1273 pm_runtime_put_autosuspend(&out->sw->dev);
1274 pm_runtime_mark_last_busy(&in->sw->dev);
1275 pm_runtime_put_autosuspend(&in->sw->dev);
4f807e47
MW
1276}
1277
8afe909b 1278static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
4f807e47 1279{
8afe909b
MW
1280 struct tb_port *in, *out;
1281 struct tb_tunnel *tunnel;
1282
1283 if (tb_port_is_dpin(port)) {
1284 tb_port_dbg(port, "DP IN resource unavailable\n");
1285 in = port;
1286 out = NULL;
1287 } else {
1288 tb_port_dbg(port, "DP OUT resource unavailable\n");
1289 in = NULL;
1290 out = port;
1291 }
1292
1293 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1294 tb_deactivate_and_free_tunnel(tunnel);
1295 list_del_init(&port->list);
1296
1297 /*
1298 * See if there is another DP OUT port that can be used for
1299 * to create another tunnel.
1300 */
6ce35635 1301 tb_recalc_estimated_bandwidth(tb);
8afe909b
MW
1302 tb_tunnel_dp(tb);
1303}
1304
1305static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1306{
1307 struct tb_cm *tcm = tb_priv(tb);
1308 struct tb_port *p;
1309
1310 if (tb_port_is_enabled(port))
1311 return;
1312
1313 list_for_each_entry(p, &tcm->dp_resources, list) {
1314 if (p == port)
1315 return;
1316 }
1317
1318 tb_port_dbg(port, "DP %s resource available\n",
1319 tb_port_is_dpin(port) ? "IN" : "OUT");
1320 list_add_tail(&port->list, &tcm->dp_resources);
1321
1322 /* Look for suitable DP IN <-> DP OUT pairs now */
1323 tb_tunnel_dp(tb);
4f807e47
MW
1324}
1325
81a2e3e4
MW
1326static void tb_disconnect_and_release_dp(struct tb *tb)
1327{
1328 struct tb_cm *tcm = tb_priv(tb);
1329 struct tb_tunnel *tunnel, *n;
1330
1331 /*
1332 * Tear down all DP tunnels and release their resources. They
1333 * will be re-established after resume based on plug events.
1334 */
1335 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1336 if (tb_tunnel_is_dp(tunnel))
1337 tb_deactivate_and_free_tunnel(tunnel);
1338 }
1339
1340 while (!list_empty(&tcm->dp_resources)) {
1341 struct tb_port *port;
1342
1343 port = list_first_entry(&tcm->dp_resources,
1344 struct tb_port, list);
1345 list_del_init(&port->list);
1346 }
1347}
1348
3da88be2
MW
1349static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1350{
1351 struct tb_tunnel *tunnel;
1352 struct tb_port *up;
1353
1354 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1355 if (WARN_ON(!up))
1356 return -ENODEV;
1357
1358 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1359 if (WARN_ON(!tunnel))
1360 return -ENODEV;
1361
30a4eca6
MW
1362 tb_switch_xhci_disconnect(sw);
1363
3da88be2
MW
1364 tb_tunnel_deactivate(tunnel);
1365 list_del(&tunnel->list);
1366 tb_tunnel_free(tunnel);
1367 return 0;
1368}
1369
99cabbb0
MW
1370static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1371{
1372 struct tb_port *up, *down, *port;
1373 struct tb_cm *tcm = tb_priv(tb);
1374 struct tb_switch *parent_sw;
1375 struct tb_tunnel *tunnel;
3364f0c1 1376
386e5e29 1377 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
99cabbb0
MW
1378 if (!up)
1379 return 0;
3364f0c1 1380
99cabbb0
MW
1381 /*
1382 * Look up available down port. Since we are chaining it should
1383 * be found right above this switch.
1384 */
1385 parent_sw = tb_to_switch(sw->dev.parent);
1386 port = tb_port_at(tb_route(sw), parent_sw);
1387 down = tb_find_pcie_down(parent_sw, port);
1388 if (!down)
1389 return 0;
1390
1391 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1392 if (!tunnel)
1393 return -ENOMEM;
1394
1395 if (tb_tunnel_activate(tunnel)) {
1396 tb_port_info(up,
1397 "PCIe tunnel activation failed, aborting\n");
1398 tb_tunnel_free(tunnel);
1399 return -EIO;
3364f0c1 1400 }
99cabbb0 1401
43f977bc
GF
1402 /*
1403 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1404 * here.
1405 */
1406 if (tb_switch_pcie_l1_enable(sw))
1407 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1408
30a4eca6
MW
1409 if (tb_switch_xhci_connect(sw))
1410 tb_sw_warn(sw, "failed to connect xHCI\n");
1411
99cabbb0
MW
1412 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1413 return 0;
3364f0c1 1414}
9da672a4 1415
180b0689
MW
1416static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1417 int transmit_path, int transmit_ring,
1418 int receive_path, int receive_ring)
7ea4cd6b
MW
1419{
1420 struct tb_cm *tcm = tb_priv(tb);
1421 struct tb_port *nhi_port, *dst_port;
1422 struct tb_tunnel *tunnel;
1423 struct tb_switch *sw;
1424
1425 sw = tb_to_switch(xd->dev.parent);
1426 dst_port = tb_port_at(xd->route, sw);
386e5e29 1427 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
7ea4cd6b
MW
1428
1429 mutex_lock(&tb->lock);
180b0689
MW
1430 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1431 transmit_ring, receive_path, receive_ring);
7ea4cd6b
MW
1432 if (!tunnel) {
1433 mutex_unlock(&tb->lock);
1434 return -ENOMEM;
1435 }
1436
1437 if (tb_tunnel_activate(tunnel)) {
1438 tb_port_info(nhi_port,
1439 "DMA tunnel activation failed, aborting\n");
1440 tb_tunnel_free(tunnel);
1441 mutex_unlock(&tb->lock);
1442 return -EIO;
1443 }
1444
1445 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1446 mutex_unlock(&tb->lock);
1447 return 0;
1448}
1449
180b0689
MW
1450static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1451 int transmit_path, int transmit_ring,
1452 int receive_path, int receive_ring)
7ea4cd6b 1453{
180b0689
MW
1454 struct tb_cm *tcm = tb_priv(tb);
1455 struct tb_port *nhi_port, *dst_port;
1456 struct tb_tunnel *tunnel, *n;
7ea4cd6b
MW
1457 struct tb_switch *sw;
1458
1459 sw = tb_to_switch(xd->dev.parent);
1460 dst_port = tb_port_at(xd->route, sw);
180b0689 1461 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
7ea4cd6b 1462
180b0689
MW
1463 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1464 if (!tb_tunnel_is_dma(tunnel))
1465 continue;
1466 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1467 continue;
1468
1469 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1470 receive_path, receive_ring))
1471 tb_deactivate_and_free_tunnel(tunnel);
1472 }
7ea4cd6b
MW
1473}
1474
180b0689
MW
1475static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1476 int transmit_path, int transmit_ring,
1477 int receive_path, int receive_ring)
7ea4cd6b
MW
1478{
1479 if (!xd->is_unplugged) {
1480 mutex_lock(&tb->lock);
180b0689
MW
1481 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1482 transmit_ring, receive_path,
1483 receive_ring);
7ea4cd6b
MW
1484 mutex_unlock(&tb->lock);
1485 }
1486 return 0;
1487}
1488
d6cc51cd
AN
1489/* hotplug handling */
1490
877e50b3 1491/*
d6cc51cd
AN
1492 * tb_handle_hotplug() - handle hotplug event
1493 *
1494 * Executes on tb->wq.
1495 */
1496static void tb_handle_hotplug(struct work_struct *work)
1497{
1498 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1499 struct tb *tb = ev->tb;
9d3cce0b 1500 struct tb_cm *tcm = tb_priv(tb);
053596d9
AN
1501 struct tb_switch *sw;
1502 struct tb_port *port;
284652a4 1503
6ac6faee
MW
1504 /* Bring the domain back from sleep if it was suspended */
1505 pm_runtime_get_sync(&tb->dev);
1506
d6cc51cd 1507 mutex_lock(&tb->lock);
9d3cce0b 1508 if (!tcm->hotplug_active)
d6cc51cd
AN
1509 goto out; /* during init, suspend or shutdown */
1510
8f965efd 1511 sw = tb_switch_find_by_route(tb, ev->route);
053596d9
AN
1512 if (!sw) {
1513 tb_warn(tb,
1514 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1515 ev->route, ev->port, ev->unplug);
1516 goto out;
1517 }
1518 if (ev->port > sw->config.max_port_number) {
1519 tb_warn(tb,
1520 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1521 ev->route, ev->port, ev->unplug);
8f965efd 1522 goto put_sw;
053596d9
AN
1523 }
1524 port = &sw->ports[ev->port];
1525 if (tb_is_upstream_port(port)) {
dfe40ca4
MW
1526 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1527 ev->route, ev->port, ev->unplug);
8f965efd 1528 goto put_sw;
053596d9 1529 }
6ac6faee
MW
1530
1531 pm_runtime_get_sync(&sw->dev);
1532
053596d9 1533 if (ev->unplug) {
dacb1287
KK
1534 tb_retimer_remove_all(port);
1535
dfe40ca4 1536 if (tb_port_has_remote(port)) {
7ea4cd6b 1537 tb_port_dbg(port, "switch unplugged\n");
aae20bb6 1538 tb_sw_set_unplugged(port->remote->sw);
3364f0c1 1539 tb_free_invalid_tunnels(tb);
8afe909b 1540 tb_remove_dp_resources(port->remote->sw);
cf29b9af 1541 tb_switch_tmu_disable(port->remote->sw);
de462039 1542 tb_switch_unconfigure_link(port->remote->sw);
91c0c120 1543 tb_switch_lane_bonding_disable(port->remote->sw);
bfe778ac 1544 tb_switch_remove(port->remote->sw);
053596d9 1545 port->remote = NULL;
dfe40ca4
MW
1546 if (port->dual_link_port)
1547 port->dual_link_port->remote = NULL;
8afe909b 1548 /* Maybe we can create another DP tunnel */
6ce35635 1549 tb_recalc_estimated_bandwidth(tb);
8afe909b 1550 tb_tunnel_dp(tb);
7ea4cd6b
MW
1551 } else if (port->xdomain) {
1552 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1553
1554 tb_port_dbg(port, "xdomain unplugged\n");
1555 /*
1556 * Service drivers are unbound during
1557 * tb_xdomain_remove() so setting XDomain as
1558 * unplugged here prevents deadlock if they call
1559 * tb_xdomain_disable_paths(). We will tear down
180b0689 1560 * all the tunnels below.
7ea4cd6b
MW
1561 */
1562 xd->is_unplugged = true;
1563 tb_xdomain_remove(xd);
1564 port->xdomain = NULL;
180b0689 1565 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
7ea4cd6b 1566 tb_xdomain_put(xd);
284652a4 1567 tb_port_unconfigure_xdomain(port);
8afe909b
MW
1568 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1569 tb_dp_resource_unavailable(tb, port);
30a4eca6
MW
1570 } else if (!port->port) {
1571 tb_sw_dbg(sw, "xHCI disconnect request\n");
1572 tb_switch_xhci_disconnect(sw);
053596d9 1573 } else {
62efe699
MW
1574 tb_port_dbg(port,
1575 "got unplug event for disconnected port, ignoring\n");
053596d9
AN
1576 }
1577 } else if (port->remote) {
62efe699 1578 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
30a4eca6
MW
1579 } else if (!port->port && sw->authorized) {
1580 tb_sw_dbg(sw, "xHCI connect request\n");
1581 tb_switch_xhci_connect(sw);
053596d9 1582 } else {
344e0643 1583 if (tb_port_is_null(port)) {
62efe699 1584 tb_port_dbg(port, "hotplug: scanning\n");
344e0643
MW
1585 tb_scan_port(port);
1586 if (!port->remote)
62efe699 1587 tb_port_dbg(port, "hotplug: no switch found\n");
8afe909b
MW
1588 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1589 tb_dp_resource_available(tb, port);
344e0643 1590 }
053596d9 1591 }
8f965efd 1592
6ac6faee
MW
1593 pm_runtime_mark_last_busy(&sw->dev);
1594 pm_runtime_put_autosuspend(&sw->dev);
1595
8f965efd
MW
1596put_sw:
1597 tb_switch_put(sw);
d6cc51cd
AN
1598out:
1599 mutex_unlock(&tb->lock);
6ac6faee
MW
1600
1601 pm_runtime_mark_last_busy(&tb->dev);
1602 pm_runtime_put_autosuspend(&tb->dev);
1603
d6cc51cd
AN
1604 kfree(ev);
1605}
1606
6ce35635
MW
1607static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
1608 int *requested_down)
1609{
1610 int allocated_up, allocated_down, available_up, available_down, ret;
1611 int requested_up_corrected, requested_down_corrected, granularity;
1612 int max_up, max_down, max_up_rounded, max_down_rounded;
1613 struct tb *tb = tunnel->tb;
1614 struct tb_port *in, *out;
1615
1616 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
1617 if (ret)
1618 return ret;
1619
1620 in = tunnel->src_port;
1621 out = tunnel->dst_port;
1622
1623 tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
1624 allocated_up, allocated_down);
1625
1626 /*
1627 * If we get rounded up request from graphics side, say HBR2 x 4
1628 * that is 17500 instead of 17280 (this is because of the
1629 * granularity), we allow it too. Here the graphics has already
1630 * negotiated with the DPRX the maximum possible rates (which is
1631 * 17280 in this case).
1632 *
1633 * Since the link cannot go higher than 17280 we use that in our
1634 * calculations but the DP IN adapter Allocated BW write must be
1635 * the same value (17500) otherwise the adapter will mark it as
1636 * failed for graphics.
1637 */
1638 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
1639 if (ret)
1640 return ret;
1641
1642 ret = usb4_dp_port_granularity(in);
1643 if (ret < 0)
1644 return ret;
1645 granularity = ret;
1646
1647 max_up_rounded = roundup(max_up, granularity);
1648 max_down_rounded = roundup(max_down, granularity);
1649
1650 /*
1651 * This will "fix" the request down to the maximum supported
1652 * rate * lanes if it is at the maximum rounded up level.
1653 */
1654 requested_up_corrected = *requested_up;
1655 if (requested_up_corrected == max_up_rounded)
1656 requested_up_corrected = max_up;
1657 else if (requested_up_corrected < 0)
1658 requested_up_corrected = 0;
1659 requested_down_corrected = *requested_down;
1660 if (requested_down_corrected == max_down_rounded)
1661 requested_down_corrected = max_down;
1662 else if (requested_down_corrected < 0)
1663 requested_down_corrected = 0;
1664
1665 tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
1666 requested_up_corrected, requested_down_corrected);
1667
1668 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
1669 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
1670 tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
1671 requested_up_corrected, requested_down_corrected,
1672 max_up_rounded, max_down_rounded);
1673 return -ENOBUFS;
1674 }
1675
1676 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
1677 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
1678 /*
1679 * If requested bandwidth is less or equal than what is
1680 * currently allocated to that tunnel we simply change
1681 * the reservation of the tunnel. Since all the tunnels
1682 * going out from the same USB4 port are in the same
1683 * group the released bandwidth will be taken into
1684 * account for the other tunnels automatically below.
1685 */
1686 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1687 requested_down);
1688 }
1689
1690 /*
1691 * More bandwidth is requested. Release all the potential
1692 * bandwidth from USB3 first.
1693 */
1694 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1695 if (ret)
1696 return ret;
1697
1698 /*
1699 * Then go over all tunnels that cross the same USB4 ports (they
1700 * are also in the same group but we use the same function here
1701 * that we use with the normal bandwidth allocation).
1702 */
1703 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1704 if (ret)
1705 goto reclaim;
1706
1707 tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
1708 available_up, available_down);
1709
1710 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
1711 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
1712 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1713 requested_down);
1714 } else {
1715 ret = -ENOBUFS;
1716 }
1717
1718reclaim:
1719 tb_reclaim_usb3_bandwidth(tb, in, out);
1720 return ret;
1721}
1722
1723static void tb_handle_dp_bandwidth_request(struct work_struct *work)
1724{
1725 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1726 int requested_bw, requested_up, requested_down, ret;
1727 struct tb_port *in, *out;
1728 struct tb_tunnel *tunnel;
1729 struct tb *tb = ev->tb;
1730 struct tb_cm *tcm = tb_priv(tb);
1731 struct tb_switch *sw;
1732
1733 pm_runtime_get_sync(&tb->dev);
1734
1735 mutex_lock(&tb->lock);
1736 if (!tcm->hotplug_active)
1737 goto unlock;
1738
1739 sw = tb_switch_find_by_route(tb, ev->route);
1740 if (!sw) {
1741 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
1742 ev->route);
1743 goto unlock;
1744 }
1745
1746 in = &sw->ports[ev->port];
1747 if (!tb_port_is_dpin(in)) {
1748 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
1749 goto unlock;
1750 }
1751
1752 tb_port_dbg(in, "handling bandwidth allocation request\n");
1753
1754 if (!usb4_dp_port_bw_mode_enabled(in)) {
1755 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
1756 goto unlock;
1757 }
1758
ace75e18
MW
1759 ret = usb4_dp_port_requested_bw(in);
1760 if (ret < 0) {
1761 if (ret == -ENODATA)
1762 tb_port_dbg(in, "no bandwidth request active\n");
1763 else
1764 tb_port_warn(in, "failed to read requested bandwidth\n");
6ce35635
MW
1765 goto unlock;
1766 }
ace75e18 1767 requested_bw = ret;
6ce35635
MW
1768
1769 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
1770
1771 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1772 if (!tunnel) {
1773 tb_port_warn(in, "failed to find tunnel\n");
1774 goto unlock;
1775 }
1776
1777 out = tunnel->dst_port;
1778
1779 if (in->sw->config.depth < out->sw->config.depth) {
1780 requested_up = -1;
1781 requested_down = requested_bw;
1782 } else {
1783 requested_up = requested_bw;
1784 requested_down = -1;
1785 }
1786
1787 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
1788 if (ret) {
1789 if (ret == -ENOBUFS)
1790 tb_port_warn(in, "not enough bandwidth available\n");
1791 else
1792 tb_port_warn(in, "failed to change bandwidth allocation\n");
1793 } else {
1794 tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
1795 requested_up, requested_down);
1796
1797 /* Update other clients about the allocation change */
1798 tb_recalc_estimated_bandwidth(tb);
1799 }
1800
1801unlock:
1802 mutex_unlock(&tb->lock);
1803
1804 pm_runtime_mark_last_busy(&tb->dev);
1805 pm_runtime_put_autosuspend(&tb->dev);
1806}
1807
1808static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
1809{
1810 struct tb_hotplug_event *ev;
1811
1812 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1813 if (!ev)
1814 return;
1815
1816 ev->tb = tb;
1817 ev->route = route;
1818 ev->port = port;
1819 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
1820 queue_work(tb->wq, &ev->work);
1821}
1822
1823static void tb_handle_notification(struct tb *tb, u64 route,
1824 const struct cfg_error_pkg *error)
1825{
1826 if (tb_cfg_ack_notification(tb->ctl, route, error))
1827 tb_warn(tb, "could not ack notification on %llx\n", route);
1828
1829 switch (error->error) {
1830 case TB_CFG_ERROR_DP_BW:
1831 tb_queue_dp_bandwidth_request(tb, route, error->port);
1832 break;
1833
1834 default:
1835 /* Ack is enough */
1836 return;
1837 }
1838}
1839
877e50b3 1840/*
d6cc51cd
AN
1841 * tb_schedule_hotplug_handler() - callback function for the control channel
1842 *
1843 * Delegates to tb_handle_hotplug.
1844 */
81a54b5e
MW
1845static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1846 const void *buf, size_t size)
d6cc51cd 1847{
81a54b5e 1848 const struct cfg_event_pkg *pkg = buf;
6ce35635 1849 u64 route = tb_cfg_get_route(&pkg->header);
81a54b5e 1850
6ce35635
MW
1851 switch (type) {
1852 case TB_CFG_PKG_ERROR:
1853 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
1854 return;
1855 case TB_CFG_PKG_EVENT:
1856 break;
1857 default:
81a54b5e
MW
1858 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1859 return;
1860 }
1861
210e9f56 1862 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
81a54b5e
MW
1863 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1864 pkg->port);
1865 }
1866
4f807e47 1867 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
d6cc51cd
AN
1868}
1869
9d3cce0b 1870static void tb_stop(struct tb *tb)
d6cc51cd 1871{
9d3cce0b 1872 struct tb_cm *tcm = tb_priv(tb);
93f36ade
MW
1873 struct tb_tunnel *tunnel;
1874 struct tb_tunnel *n;
3364f0c1 1875
6ac6faee 1876 cancel_delayed_work(&tcm->remove_work);
3364f0c1 1877 /* tunnels are only present after everything has been initialized */
7ea4cd6b
MW
1878 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1879 /*
1880 * DMA tunnels require the driver to be functional so we
1881 * tear them down. Other protocol tunnels can be left
1882 * intact.
1883 */
1884 if (tb_tunnel_is_dma(tunnel))
1885 tb_tunnel_deactivate(tunnel);
93f36ade 1886 tb_tunnel_free(tunnel);
7ea4cd6b 1887 }
bfe778ac 1888 tb_switch_remove(tb->root_switch);
9d3cce0b 1889 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
d6cc51cd
AN
1890}
1891
99cabbb0
MW
1892static int tb_scan_finalize_switch(struct device *dev, void *data)
1893{
1894 if (tb_is_switch(dev)) {
1895 struct tb_switch *sw = tb_to_switch(dev);
1896
1897 /*
1898 * If we found that the switch was already setup by the
1899 * boot firmware, mark it as authorized now before we
1900 * send uevent to userspace.
1901 */
1902 if (sw->boot)
1903 sw->authorized = 1;
1904
1905 dev_set_uevent_suppress(dev, false);
1906 kobject_uevent(&dev->kobj, KOBJ_ADD);
1907 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1908 }
1909
1910 return 0;
1911}
1912
9d3cce0b 1913static int tb_start(struct tb *tb)
d6cc51cd 1914{
9d3cce0b 1915 struct tb_cm *tcm = tb_priv(tb);
bfe778ac 1916 int ret;
d6cc51cd 1917
bfe778ac 1918 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
444ac384
MW
1919 if (IS_ERR(tb->root_switch))
1920 return PTR_ERR(tb->root_switch);
a25c8b2f 1921
e6b245cc
MW
1922 /*
1923 * ICM firmware upgrade needs running firmware and in native
1924 * mode that is not available so disable firmware upgrade of the
1925 * root switch.
5172eb9a
SC
1926 *
1927 * However, USB4 routers support NVM firmware upgrade if they
1928 * implement the necessary router operations.
e6b245cc 1929 */
5172eb9a 1930 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
6ac6faee
MW
1931 /* All USB4 routers support runtime PM */
1932 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
e6b245cc 1933
bfe778ac
MW
1934 ret = tb_switch_configure(tb->root_switch);
1935 if (ret) {
1936 tb_switch_put(tb->root_switch);
1937 return ret;
1938 }
1939
1940 /* Announce the switch to the world */
1941 ret = tb_switch_add(tb->root_switch);
1942 if (ret) {
1943 tb_switch_put(tb->root_switch);
1944 return ret;
1945 }
1946
b017a46d
GF
1947 /*
1948 * To support highest CLx state, we set host router's TMU to
1949 * Normal mode.
1950 */
1951 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1952 false);
cf29b9af
RM
1953 /* Enable TMU if it is off */
1954 tb_switch_tmu_enable(tb->root_switch);
9da672a4
AN
1955 /* Full scan to discover devices added before the driver was loaded. */
1956 tb_scan_switch(tb->root_switch);
0414bec5 1957 /* Find out tunnels created by the boot firmware */
43bddb26 1958 tb_discover_tunnels(tb);
b60e31bf
SM
1959 /* Add DP resources from the DP tunnels created by the boot firmware */
1960 tb_discover_dp_resources(tb);
e6f81858
RM
1961 /*
1962 * If the boot firmware did not create USB 3.x tunnels create them
1963 * now for the whole topology.
1964 */
1965 tb_create_usb3_tunnels(tb->root_switch);
8afe909b
MW
1966 /* Add DP IN resources for the root switch */
1967 tb_add_dp_resources(tb->root_switch);
99cabbb0
MW
1968 /* Make the discovered switches available to the userspace */
1969 device_for_each_child(&tb->root_switch->dev, NULL,
1970 tb_scan_finalize_switch);
9da672a4 1971
d6cc51cd 1972 /* Allow tb_handle_hotplug to progress events */
9d3cce0b
MW
1973 tcm->hotplug_active = true;
1974 return 0;
d6cc51cd
AN
1975}
1976
9d3cce0b 1977static int tb_suspend_noirq(struct tb *tb)
23dd5bb4 1978{
9d3cce0b
MW
1979 struct tb_cm *tcm = tb_priv(tb);
1980
daa5140f 1981 tb_dbg(tb, "suspending...\n");
81a2e3e4 1982 tb_disconnect_and_release_dp(tb);
6ac6faee 1983 tb_switch_suspend(tb->root_switch, false);
9d3cce0b 1984 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
daa5140f 1985 tb_dbg(tb, "suspend finished\n");
9d3cce0b
MW
1986
1987 return 0;
23dd5bb4
AN
1988}
1989
91c0c120
MW
1990static void tb_restore_children(struct tb_switch *sw)
1991{
1992 struct tb_port *port;
990f4b85 1993 int ret;
91c0c120 1994
6ac6faee
MW
1995 /* No need to restore if the router is already unplugged */
1996 if (sw->is_unplugged)
1997 return;
1998
a28ec0e1 1999 /*
b017a46d
GF
2000 * CL0s and CL1 are enabled and supported together.
2001 * Silently ignore CLx re-enabling in case CLx is not supported.
a28ec0e1 2002 */
b017a46d
GF
2003 ret = tb_switch_enable_clx(sw, TB_CL1);
2004 if (ret && ret != -EOPNOTSUPP)
2005 tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
2006 tb_switch_clx_name(TB_CL1));
2007
2008 if (tb_switch_is_clx_enabled(sw, TB_CL1))
2009 /*
2010 * To support highest CLx state, we set router's TMU to
2011 * Normal-Uni mode.
2012 */
2013 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
2014 else
2015 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
2016 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
2017
cf29b9af
RM
2018 if (tb_enable_tmu(sw))
2019 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2020
91c0c120 2021 tb_switch_for_each_port(sw, port) {
284652a4 2022 if (!tb_port_has_remote(port) && !port->xdomain)
91c0c120
MW
2023 continue;
2024
284652a4
MW
2025 if (port->remote) {
2026 tb_switch_lane_bonding_enable(port->remote->sw);
2027 tb_switch_configure_link(port->remote->sw);
91c0c120 2028
284652a4
MW
2029 tb_restore_children(port->remote->sw);
2030 } else if (port->xdomain) {
f9cad07b 2031 tb_port_configure_xdomain(port, port->xdomain);
284652a4 2032 }
91c0c120
MW
2033 }
2034}
2035
9d3cce0b 2036static int tb_resume_noirq(struct tb *tb)
23dd5bb4 2037{
9d3cce0b 2038 struct tb_cm *tcm = tb_priv(tb);
93f36ade 2039 struct tb_tunnel *tunnel, *n;
43bddb26
MW
2040 unsigned int usb3_delay = 0;
2041 LIST_HEAD(tunnels);
9d3cce0b 2042
daa5140f 2043 tb_dbg(tb, "resuming...\n");
23dd5bb4
AN
2044
2045 /* remove any pci devices the firmware might have setup */
356b6c4e 2046 tb_switch_reset(tb->root_switch);
23dd5bb4
AN
2047
2048 tb_switch_resume(tb->root_switch);
2049 tb_free_invalid_tunnels(tb);
2050 tb_free_unplugged_children(tb->root_switch);
91c0c120 2051 tb_restore_children(tb->root_switch);
43bddb26
MW
2052
2053 /*
2054 * If we get here from suspend to disk the boot firmware or the
2055 * restore kernel might have created tunnels of its own. Since
2056 * we cannot be sure they are usable for us we find and tear
2057 * them down.
2058 */
2059 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2060 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2061 if (tb_tunnel_is_usb3(tunnel))
2062 usb3_delay = 500;
2063 tb_tunnel_deactivate(tunnel);
2064 tb_tunnel_free(tunnel);
2065 }
2066
2067 /* Re-create our tunnels now */
2068 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2069 /* USB3 requires delay before it can be re-activated */
2070 if (tb_tunnel_is_usb3(tunnel)) {
2071 msleep(usb3_delay);
2072 /* Only need to do it once */
2073 usb3_delay = 0;
2074 }
93f36ade 2075 tb_tunnel_restart(tunnel);
43bddb26 2076 }
9d3cce0b 2077 if (!list_empty(&tcm->tunnel_list)) {
23dd5bb4
AN
2078 /*
2079 * the pcie links need some time to get going.
2080 * 100ms works for me...
2081 */
daa5140f 2082 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
23dd5bb4
AN
2083 msleep(100);
2084 }
2085 /* Allow tb_handle_hotplug to progress events */
9d3cce0b 2086 tcm->hotplug_active = true;
daa5140f 2087 tb_dbg(tb, "resume finished\n");
9d3cce0b
MW
2088
2089 return 0;
2090}
2091
7ea4cd6b
MW
2092static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2093{
b433d010
MW
2094 struct tb_port *port;
2095 int ret = 0;
7ea4cd6b 2096
b433d010 2097 tb_switch_for_each_port(sw, port) {
7ea4cd6b
MW
2098 if (tb_is_upstream_port(port))
2099 continue;
2100 if (port->xdomain && port->xdomain->is_unplugged) {
dacb1287 2101 tb_retimer_remove_all(port);
7ea4cd6b 2102 tb_xdomain_remove(port->xdomain);
284652a4 2103 tb_port_unconfigure_xdomain(port);
7ea4cd6b
MW
2104 port->xdomain = NULL;
2105 ret++;
2106 } else if (port->remote) {
2107 ret += tb_free_unplugged_xdomains(port->remote->sw);
2108 }
2109 }
2110
2111 return ret;
2112}
2113
884e4d57
MW
2114static int tb_freeze_noirq(struct tb *tb)
2115{
2116 struct tb_cm *tcm = tb_priv(tb);
2117
2118 tcm->hotplug_active = false;
2119 return 0;
2120}
2121
2122static int tb_thaw_noirq(struct tb *tb)
2123{
2124 struct tb_cm *tcm = tb_priv(tb);
2125
2126 tcm->hotplug_active = true;
2127 return 0;
2128}
2129
7ea4cd6b
MW
2130static void tb_complete(struct tb *tb)
2131{
2132 /*
2133 * Release any unplugged XDomains and if there is a case where
2134 * another domain is swapped in place of unplugged XDomain we
2135 * need to run another rescan.
2136 */
2137 mutex_lock(&tb->lock);
2138 if (tb_free_unplugged_xdomains(tb->root_switch))
2139 tb_scan_switch(tb->root_switch);
2140 mutex_unlock(&tb->lock);
2141}
2142
6ac6faee
MW
2143static int tb_runtime_suspend(struct tb *tb)
2144{
2145 struct tb_cm *tcm = tb_priv(tb);
2146
2147 mutex_lock(&tb->lock);
2148 tb_switch_suspend(tb->root_switch, true);
2149 tcm->hotplug_active = false;
2150 mutex_unlock(&tb->lock);
2151
2152 return 0;
2153}
2154
2155static void tb_remove_work(struct work_struct *work)
2156{
2157 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2158 struct tb *tb = tcm_to_tb(tcm);
2159
2160 mutex_lock(&tb->lock);
2161 if (tb->root_switch) {
2162 tb_free_unplugged_children(tb->root_switch);
2163 tb_free_unplugged_xdomains(tb->root_switch);
2164 }
2165 mutex_unlock(&tb->lock);
2166}
2167
2168static int tb_runtime_resume(struct tb *tb)
2169{
2170 struct tb_cm *tcm = tb_priv(tb);
2171 struct tb_tunnel *tunnel, *n;
2172
2173 mutex_lock(&tb->lock);
2174 tb_switch_resume(tb->root_switch);
2175 tb_free_invalid_tunnels(tb);
2176 tb_restore_children(tb->root_switch);
2177 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2178 tb_tunnel_restart(tunnel);
2179 tcm->hotplug_active = true;
2180 mutex_unlock(&tb->lock);
2181
2182 /*
2183 * Schedule cleanup of any unplugged devices. Run this in a
2184 * separate thread to avoid possible deadlock if the device
2185 * removal runtime resumes the unplugged device.
2186 */
2187 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2188 return 0;
2189}
2190
9d3cce0b
MW
2191static const struct tb_cm_ops tb_cm_ops = {
2192 .start = tb_start,
2193 .stop = tb_stop,
2194 .suspend_noirq = tb_suspend_noirq,
2195 .resume_noirq = tb_resume_noirq,
884e4d57
MW
2196 .freeze_noirq = tb_freeze_noirq,
2197 .thaw_noirq = tb_thaw_noirq,
7ea4cd6b 2198 .complete = tb_complete,
6ac6faee
MW
2199 .runtime_suspend = tb_runtime_suspend,
2200 .runtime_resume = tb_runtime_resume,
81a54b5e 2201 .handle_event = tb_handle_event,
3da88be2 2202 .disapprove_switch = tb_disconnect_pci,
99cabbb0 2203 .approve_switch = tb_tunnel_pci,
7ea4cd6b
MW
2204 .approve_xdomain_paths = tb_approve_xdomain_paths,
2205 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
9d3cce0b
MW
2206};
2207
349bfe08
MW
2208/*
2209 * During suspend the Thunderbolt controller is reset and all PCIe
2210 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2211 * during resume. This adds device links between the tunneled PCIe
2212 * downstream ports and the NHI so that the device core will make sure
2213 * NHI is resumed first before the rest.
2214 */
2215static void tb_apple_add_links(struct tb_nhi *nhi)
2216{
2217 struct pci_dev *upstream, *pdev;
2218
2219 if (!x86_apple_machine)
2220 return;
2221
2222 switch (nhi->pdev->device) {
2223 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2224 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2225 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2226 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2227 break;
2228 default:
2229 return;
2230 }
2231
2232 upstream = pci_upstream_bridge(nhi->pdev);
2233 while (upstream) {
2234 if (!pci_is_pcie(upstream))
2235 return;
2236 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2237 break;
2238 upstream = pci_upstream_bridge(upstream);
2239 }
2240
2241 if (!upstream)
2242 return;
2243
2244 /*
2245 * For each hotplug downstream port, create add device link
2246 * back to NHI so that PCIe tunnels can be re-established after
2247 * sleep.
2248 */
2249 for_each_pci_bridge(pdev, upstream->subordinate) {
2250 const struct device_link *link;
2251
2252 if (!pci_is_pcie(pdev))
2253 continue;
2254 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2255 !pdev->is_hotplug_bridge)
2256 continue;
2257
2258 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2259 DL_FLAG_AUTOREMOVE_SUPPLIER |
2260 DL_FLAG_PM_RUNTIME);
2261 if (link) {
2262 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2263 dev_name(&pdev->dev));
2264 } else {
2265 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2266 dev_name(&pdev->dev));
2267 }
2268 }
2269}
2270
9d3cce0b
MW
2271struct tb *tb_probe(struct tb_nhi *nhi)
2272{
2273 struct tb_cm *tcm;
2274 struct tb *tb;
2275
7f0a34d7 2276 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
9d3cce0b
MW
2277 if (!tb)
2278 return NULL;
2279
c6da62a2
MW
2280 if (tb_acpi_may_tunnel_pcie())
2281 tb->security_level = TB_SECURITY_USER;
2282 else
2283 tb->security_level = TB_SECURITY_NOPCIE;
2284
9d3cce0b
MW
2285 tb->cm_ops = &tb_cm_ops;
2286
2287 tcm = tb_priv(tb);
2288 INIT_LIST_HEAD(&tcm->tunnel_list);
8afe909b 2289 INIT_LIST_HEAD(&tcm->dp_resources);
6ac6faee 2290 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
6ce35635 2291 tb_init_bandwidth_groups(tcm);
9d3cce0b 2292
e0258805
MW
2293 tb_dbg(tb, "using software connection manager\n");
2294
349bfe08
MW
2295 tb_apple_add_links(nhi);
2296 tb_acpi_add_links(nhi);
2297
9d3cce0b 2298 return tb;
23dd5bb4 2299}