thunderbolt: Handle bandwidth allocation mode enablement notification
[linux-block.git] / drivers / thunderbolt / tb.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d6cc51cd 2/*
99cabbb0 3 * Thunderbolt driver - bus logic (NHI independent)
d6cc51cd
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
99cabbb0 6 * Copyright (C) 2019, Intel Corporation
d6cc51cd
AN
7 */
8
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <linux/delay.h>
6ac6faee 12#include <linux/pm_runtime.h>
349bfe08 13#include <linux/platform_data/x86/apple.h>
d6cc51cd
AN
14
15#include "tb.h"
7adf6097 16#include "tb_regs.h"
1752b9f7 17#include "tunnel.h"
d6cc51cd 18
6ce35635
MW
19#define TB_TIMEOUT 100 /* ms */
20#define MAX_GROUPS 7 /* max Group_ID is 7 */
7f0a34d7 21
9d3cce0b
MW
22/**
23 * struct tb_cm - Simple Thunderbolt connection manager
24 * @tunnel_list: List of active tunnels
8afe909b 25 * @dp_resources: List of available DP resources for DP tunneling
9d3cce0b
MW
26 * @hotplug_active: tb_handle_hotplug will stop progressing plug
27 * events and exit if this is not set (it needs to
28 * acquire the lock one more time). Used to drain wq
29 * after cfg has been paused.
6ac6faee
MW
30 * @remove_work: Work used to remove any unplugged routers after
31 * runtime resume
6ce35635 32 * @groups: Bandwidth groups used in this domain.
9d3cce0b
MW
33 */
34struct tb_cm {
35 struct list_head tunnel_list;
8afe909b 36 struct list_head dp_resources;
9d3cce0b 37 bool hotplug_active;
6ac6faee 38 struct delayed_work remove_work;
6ce35635 39 struct tb_bandwidth_group groups[MAX_GROUPS];
9d3cce0b 40};
9da672a4 41
6ac6faee
MW
42static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
43{
44 return ((void *)tcm - sizeof(struct tb));
45}
46
4f807e47
MW
47struct tb_hotplug_event {
48 struct work_struct work;
49 struct tb *tb;
50 u64 route;
51 u8 port;
52 bool unplug;
53};
54
6ce35635
MW
55static void tb_init_bandwidth_groups(struct tb_cm *tcm)
56{
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
60 struct tb_bandwidth_group *group = &tcm->groups[i];
61
62 group->tb = tcm_to_tb(tcm);
63 group->index = i + 1;
64 INIT_LIST_HEAD(&group->ports);
65 }
66}
67
68static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
69 struct tb_port *in)
70{
71 if (!group || WARN_ON(in->group))
72 return;
73
74 in->group = group;
75 list_add_tail(&in->group_list, &group->ports);
76
77 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
78}
79
80static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
81{
82 int i;
83
84 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
85 struct tb_bandwidth_group *group = &tcm->groups[i];
86
87 if (list_empty(&group->ports))
88 return group;
89 }
90
91 return NULL;
92}
93
94static struct tb_bandwidth_group *
95tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
96 struct tb_port *out)
97{
98 struct tb_bandwidth_group *group;
99 struct tb_tunnel *tunnel;
100
101 /*
102 * Find all DP tunnels that go through all the same USB4 links
103 * as this one. Because we always setup tunnels the same way we
104 * can just check for the routers at both ends of the tunnels
105 * and if they are the same we have a match.
106 */
107 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
108 if (!tb_tunnel_is_dp(tunnel))
109 continue;
110
111 if (tunnel->src_port->sw == in->sw &&
112 tunnel->dst_port->sw == out->sw) {
113 group = tunnel->src_port->group;
114 if (group) {
115 tb_bandwidth_group_attach_port(group, in);
116 return group;
117 }
118 }
119 }
120
121 /* Pick up next available group then */
122 group = tb_find_free_bandwidth_group(tcm);
123 if (group)
124 tb_bandwidth_group_attach_port(group, in);
125 else
126 tb_port_warn(in, "no available bandwidth groups\n");
127
128 return group;
129}
130
131static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
132 struct tb_port *out)
133{
134 if (usb4_dp_port_bw_mode_enabled(in)) {
135 int index, i;
136
137 index = usb4_dp_port_group_id(in);
138 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
139 if (tcm->groups[i].index == index) {
140 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
141 return;
142 }
143 }
144 }
145
146 tb_attach_bandwidth_group(tcm, in, out);
147}
148
149static void tb_detach_bandwidth_group(struct tb_port *in)
150{
151 struct tb_bandwidth_group *group = in->group;
152
153 if (group) {
154 in->group = NULL;
155 list_del_init(&in->group_list);
156
157 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
158 }
159}
160
4f807e47
MW
161static void tb_handle_hotplug(struct work_struct *work);
162
163static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
164{
165 struct tb_hotplug_event *ev;
166
167 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
168 if (!ev)
169 return;
170
171 ev->tb = tb;
172 ev->route = route;
173 ev->port = port;
174 ev->unplug = unplug;
175 INIT_WORK(&ev->work, tb_handle_hotplug);
176 queue_work(tb->wq, &ev->work);
177}
178
9da672a4
AN
179/* enumeration & hot plug handling */
180
8afe909b
MW
181static void tb_add_dp_resources(struct tb_switch *sw)
182{
183 struct tb_cm *tcm = tb_priv(sw->tb);
184 struct tb_port *port;
185
186 tb_switch_for_each_port(sw, port) {
187 if (!tb_port_is_dpin(port))
188 continue;
189
190 if (!tb_switch_query_dp_resource(sw, port))
191 continue;
192
193 list_add_tail(&port->list, &tcm->dp_resources);
194 tb_port_dbg(port, "DP IN resource available\n");
195 }
196}
197
198static void tb_remove_dp_resources(struct tb_switch *sw)
199{
200 struct tb_cm *tcm = tb_priv(sw->tb);
201 struct tb_port *port, *tmp;
202
203 /* Clear children resources first */
204 tb_switch_for_each_port(sw, port) {
205 if (tb_port_has_remote(port))
206 tb_remove_dp_resources(port->remote->sw);
207 }
208
209 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
210 if (port->sw == sw) {
211 tb_port_dbg(port, "DP OUT resource unavailable\n");
212 list_del_init(&port->list);
213 }
214 }
215}
216
b60e31bf
SM
217static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218{
219 struct tb_cm *tcm = tb_priv(tb);
220 struct tb_port *p;
221
222 list_for_each_entry(p, &tcm->dp_resources, list) {
223 if (p == port)
224 return;
225 }
226
227 tb_port_dbg(port, "DP %s resource available discovered\n",
228 tb_port_is_dpin(port) ? "IN" : "OUT");
229 list_add_tail(&port->list, &tcm->dp_resources);
230}
231
232static void tb_discover_dp_resources(struct tb *tb)
233{
234 struct tb_cm *tcm = tb_priv(tb);
235 struct tb_tunnel *tunnel;
236
237 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238 if (tb_tunnel_is_dp(tunnel))
239 tb_discover_dp_resource(tb, tunnel->dst_port);
240 }
241}
242
43bddb26
MW
243static void tb_switch_discover_tunnels(struct tb_switch *sw,
244 struct list_head *list,
245 bool alloc_hopids)
0414bec5
MW
246{
247 struct tb *tb = sw->tb;
0414bec5 248 struct tb_port *port;
0414bec5 249
b433d010 250 tb_switch_for_each_port(sw, port) {
0414bec5
MW
251 struct tb_tunnel *tunnel = NULL;
252
0414bec5 253 switch (port->config.type) {
4f807e47 254 case TB_TYPE_DP_HDMI_IN:
43bddb26 255 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
3084b48f
GF
256 /*
257 * In case of DP tunnel exists, change host router's
258 * 1st children TMU mode to HiFi for CL0s to work.
259 */
260 if (tunnel)
261 tb_switch_enable_tmu_1st_child(tb->root_switch,
262 TB_SWITCH_TMU_RATE_HIFI);
4f807e47
MW
263 break;
264
0414bec5 265 case TB_TYPE_PCIE_DOWN:
43bddb26 266 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
0414bec5
MW
267 break;
268
e6f81858 269 case TB_TYPE_USB3_DOWN:
43bddb26 270 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
e6f81858
RM
271 break;
272
0414bec5
MW
273 default:
274 break;
275 }
276
43bddb26
MW
277 if (tunnel)
278 list_add_tail(&tunnel->list, list);
279 }
4f807e47 280
43bddb26
MW
281 tb_switch_for_each_port(sw, port) {
282 if (tb_port_has_remote(port)) {
283 tb_switch_discover_tunnels(port->remote->sw, list,
284 alloc_hopids);
285 }
286 }
287}
288
289static void tb_discover_tunnels(struct tb *tb)
290{
291 struct tb_cm *tcm = tb_priv(tb);
292 struct tb_tunnel *tunnel;
293
294 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
295
296 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
4f807e47 297 if (tb_tunnel_is_pci(tunnel)) {
0414bec5
MW
298 struct tb_switch *parent = tunnel->dst_port->sw;
299
300 while (parent != tunnel->src_port->sw) {
301 parent->boot = true;
302 parent = tb_switch_parent(parent);
303 }
c94732bd 304 } else if (tb_tunnel_is_dp(tunnel)) {
6ce35635
MW
305 struct tb_port *in = tunnel->src_port;
306 struct tb_port *out = tunnel->dst_port;
307
c94732bd 308 /* Keep the domain from powering down */
6ce35635
MW
309 pm_runtime_get_sync(&in->sw->dev);
310 pm_runtime_get_sync(&out->sw->dev);
311
312 tb_discover_bandwidth_group(tcm, in, out);
0414bec5 313 }
0414bec5
MW
314 }
315}
9da672a4 316
f9cad07b 317static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
284652a4
MW
318{
319 if (tb_switch_is_usb4(port->sw))
f9cad07b 320 return usb4_port_configure_xdomain(port, xd);
284652a4
MW
321 return tb_lc_configure_xdomain(port);
322}
323
324static void tb_port_unconfigure_xdomain(struct tb_port *port)
325{
326 if (tb_switch_is_usb4(port->sw))
327 usb4_port_unconfigure_xdomain(port);
328 else
329 tb_lc_unconfigure_xdomain(port);
341d4518
MW
330
331 tb_port_enable(port->dual_link_port);
284652a4
MW
332}
333
7ea4cd6b
MW
334static void tb_scan_xdomain(struct tb_port *port)
335{
336 struct tb_switch *sw = port->sw;
337 struct tb *tb = sw->tb;
338 struct tb_xdomain *xd;
339 u64 route;
340
5ca67688
MW
341 if (!tb_is_xdomain_enabled())
342 return;
343
7ea4cd6b
MW
344 route = tb_downstream_route(port);
345 xd = tb_xdomain_find_by_route(tb, route);
346 if (xd) {
347 tb_xdomain_put(xd);
348 return;
349 }
350
351 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
352 NULL);
353 if (xd) {
354 tb_port_at(route, sw)->xdomain = xd;
f9cad07b 355 tb_port_configure_xdomain(port, xd);
7ea4cd6b
MW
356 tb_xdomain_add(xd);
357 }
358}
359
cf29b9af
RM
360static int tb_enable_tmu(struct tb_switch *sw)
361{
362 int ret;
363
364 /* If it is already enabled in correct mode, don't touch it */
b017a46d 365 if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
cf29b9af
RM
366 return 0;
367
368 ret = tb_switch_tmu_disable(sw);
369 if (ret)
370 return ret;
371
372 ret = tb_switch_tmu_post_time(sw);
373 if (ret)
374 return ret;
375
376 return tb_switch_tmu_enable(sw);
377}
378
e6f81858
RM
379/**
380 * tb_find_unused_port() - return the first inactive port on @sw
381 * @sw: Switch to find the port on
382 * @type: Port type to look for
383 */
384static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
385 enum tb_port_type type)
386{
387 struct tb_port *port;
388
389 tb_switch_for_each_port(sw, port) {
390 if (tb_is_upstream_port(port))
391 continue;
392 if (port->config.type != type)
393 continue;
394 if (!port->cap_adap)
395 continue;
396 if (tb_port_is_enabled(port))
397 continue;
398 return port;
399 }
400 return NULL;
401}
402
403static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
77cfa40f 404 const struct tb_port *port)
e6f81858
RM
405{
406 struct tb_port *down;
407
408 down = usb4_switch_map_usb3_down(sw, port);
77cfa40f 409 if (down && !tb_usb3_port_is_enabled(down))
e6f81858 410 return down;
77cfa40f 411 return NULL;
e6f81858
RM
412}
413
0bd680cd
MW
414static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
415 struct tb_port *src_port,
416 struct tb_port *dst_port)
417{
418 struct tb_cm *tcm = tb_priv(tb);
419 struct tb_tunnel *tunnel;
420
421 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
422 if (tunnel->type == type &&
423 ((src_port && src_port == tunnel->src_port) ||
424 (dst_port && dst_port == tunnel->dst_port))) {
425 return tunnel;
426 }
427 }
428
429 return NULL;
430}
431
432static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
433 struct tb_port *src_port,
434 struct tb_port *dst_port)
435{
436 struct tb_port *port, *usb3_down;
437 struct tb_switch *sw;
438
439 /* Pick the router that is deepest in the topology */
440 if (dst_port->sw->config.depth > src_port->sw->config.depth)
441 sw = dst_port->sw;
442 else
443 sw = src_port->sw;
444
445 /* Can't be the host router */
446 if (sw == tb->root_switch)
447 return NULL;
448
449 /* Find the downstream USB4 port that leads to this router */
450 port = tb_port_at(tb_route(sw), tb->root_switch);
451 /* Find the corresponding host router USB3 downstream port */
452 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
453 if (!usb3_down)
454 return NULL;
455
456 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
457}
458
459static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
460 struct tb_port *dst_port, int *available_up, int *available_down)
461{
462 int usb3_consumed_up, usb3_consumed_down, ret;
463 struct tb_cm *tcm = tb_priv(tb);
464 struct tb_tunnel *tunnel;
465 struct tb_port *port;
466
2426fdf7
MW
467 tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
468 tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
469 dst_port->port);
0bd680cd
MW
470
471 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
6ce35635
MW
472 if (tunnel && tunnel->src_port != src_port &&
473 tunnel->dst_port != dst_port) {
0bd680cd
MW
474 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
475 &usb3_consumed_down);
476 if (ret)
477 return ret;
478 } else {
479 usb3_consumed_up = 0;
480 usb3_consumed_down = 0;
481 }
482
483 *available_up = *available_down = 40000;
484
485 /* Find the minimum available bandwidth over all links */
486 tb_for_each_port_on_path(src_port, dst_port, port) {
487 int link_speed, link_width, up_bw, down_bw;
488
489 if (!tb_port_is_null(port))
490 continue;
491
492 if (tb_is_upstream_port(port)) {
493 link_speed = port->sw->link_speed;
494 } else {
495 link_speed = tb_port_get_link_speed(port);
496 if (link_speed < 0)
497 return link_speed;
498 }
499
500 link_width = port->bonded ? 2 : 1;
501
502 up_bw = link_speed * link_width * 1000; /* Mb/s */
503 /* Leave 10% guard band */
504 up_bw -= up_bw / 10;
505 down_bw = up_bw;
506
2426fdf7
MW
507 tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
508 down_bw);
0bd680cd
MW
509
510 /*
511 * Find all DP tunnels that cross the port and reduce
512 * their consumed bandwidth from the available.
513 */
514 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
515 int dp_consumed_up, dp_consumed_down;
516
6ce35635
MW
517 if (tb_tunnel_is_invalid(tunnel))
518 continue;
519
0bd680cd
MW
520 if (!tb_tunnel_is_dp(tunnel))
521 continue;
522
523 if (!tb_tunnel_port_on_path(tunnel, port))
524 continue;
525
6ce35635
MW
526 /*
527 * Ignore the DP tunnel between src_port and
528 * dst_port because it is the same tunnel and we
529 * may be re-calculating estimated bandwidth.
530 */
531 if (tunnel->src_port == src_port &&
532 tunnel->dst_port == dst_port)
533 continue;
534
0bd680cd
MW
535 ret = tb_tunnel_consumed_bandwidth(tunnel,
536 &dp_consumed_up,
537 &dp_consumed_down);
538 if (ret)
539 return ret;
540
541 up_bw -= dp_consumed_up;
542 down_bw -= dp_consumed_down;
543 }
544
545 /*
546 * If USB3 is tunneled from the host router down to the
547 * branch leading to port we need to take USB3 consumed
548 * bandwidth into account regardless whether it actually
549 * crosses the port.
550 */
551 up_bw -= usb3_consumed_up;
552 down_bw -= usb3_consumed_down;
553
554 if (up_bw < *available_up)
555 *available_up = up_bw;
556 if (down_bw < *available_down)
557 *available_down = down_bw;
558 }
559
560 if (*available_up < 0)
561 *available_up = 0;
562 if (*available_down < 0)
563 *available_down = 0;
564
565 return 0;
566}
567
568static int tb_release_unused_usb3_bandwidth(struct tb *tb,
569 struct tb_port *src_port,
570 struct tb_port *dst_port)
571{
572 struct tb_tunnel *tunnel;
573
574 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
575 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
576}
577
578static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
579 struct tb_port *dst_port)
580{
581 int ret, available_up, available_down;
582 struct tb_tunnel *tunnel;
583
584 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
585 if (!tunnel)
586 return;
587
588 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
589
590 /*
591 * Calculate available bandwidth for the first hop USB3 tunnel.
592 * That determines the whole USB3 bandwidth for this branch.
593 */
594 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
595 &available_up, &available_down);
596 if (ret) {
597 tb_warn(tb, "failed to calculate available bandwidth\n");
598 return;
599 }
600
601 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
602 available_up, available_down);
603
604 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
605}
606
e6f81858
RM
607static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
608{
609 struct tb_switch *parent = tb_switch_parent(sw);
0bd680cd 610 int ret, available_up, available_down;
e6f81858
RM
611 struct tb_port *up, *down, *port;
612 struct tb_cm *tcm = tb_priv(tb);
613 struct tb_tunnel *tunnel;
614
c6da62a2
MW
615 if (!tb_acpi_may_tunnel_usb3()) {
616 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
617 return 0;
618 }
619
e6f81858
RM
620 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
621 if (!up)
622 return 0;
623
bbcf40b3
MW
624 if (!sw->link_usb4)
625 return 0;
626
e6f81858
RM
627 /*
628 * Look up available down port. Since we are chaining it should
629 * be found right above this switch.
630 */
631 port = tb_port_at(tb_route(sw), parent);
632 down = tb_find_usb3_down(parent, port);
633 if (!down)
634 return 0;
635
636 if (tb_route(parent)) {
637 struct tb_port *parent_up;
638 /*
639 * Check first that the parent switch has its upstream USB3
640 * port enabled. Otherwise the chain is not complete and
641 * there is no point setting up a new tunnel.
642 */
643 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
644 if (!parent_up || !tb_port_is_enabled(parent_up))
645 return 0;
0bd680cd
MW
646
647 /* Make all unused bandwidth available for the new tunnel */
648 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
649 if (ret)
650 return ret;
e6f81858
RM
651 }
652
0bd680cd
MW
653 ret = tb_available_bandwidth(tb, down, up, &available_up,
654 &available_down);
655 if (ret)
656 goto err_reclaim;
657
658 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
659 available_up, available_down);
660
661 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
662 available_down);
663 if (!tunnel) {
664 ret = -ENOMEM;
665 goto err_reclaim;
666 }
e6f81858
RM
667
668 if (tb_tunnel_activate(tunnel)) {
669 tb_port_info(up,
670 "USB3 tunnel activation failed, aborting\n");
0bd680cd
MW
671 ret = -EIO;
672 goto err_free;
e6f81858
RM
673 }
674
675 list_add_tail(&tunnel->list, &tcm->tunnel_list);
0bd680cd
MW
676 if (tb_route(parent))
677 tb_reclaim_usb3_bandwidth(tb, down, up);
678
e6f81858 679 return 0;
0bd680cd
MW
680
681err_free:
682 tb_tunnel_free(tunnel);
683err_reclaim:
684 if (tb_route(parent))
685 tb_reclaim_usb3_bandwidth(tb, down, up);
686
687 return ret;
e6f81858
RM
688}
689
690static int tb_create_usb3_tunnels(struct tb_switch *sw)
691{
692 struct tb_port *port;
693 int ret;
694
c6da62a2
MW
695 if (!tb_acpi_may_tunnel_usb3())
696 return 0;
697
e6f81858
RM
698 if (tb_route(sw)) {
699 ret = tb_tunnel_usb3(sw->tb, sw);
700 if (ret)
701 return ret;
702 }
703
704 tb_switch_for_each_port(sw, port) {
705 if (!tb_port_has_remote(port))
706 continue;
707 ret = tb_create_usb3_tunnels(port->remote->sw);
708 if (ret)
709 return ret;
710 }
711
712 return 0;
713}
714
9da672a4
AN
715static void tb_scan_port(struct tb_port *port);
716
877e50b3 717/*
9da672a4
AN
718 * tb_scan_switch() - scan for and initialize downstream switches
719 */
720static void tb_scan_switch(struct tb_switch *sw)
721{
b433d010
MW
722 struct tb_port *port;
723
6ac6faee
MW
724 pm_runtime_get_sync(&sw->dev);
725
b433d010
MW
726 tb_switch_for_each_port(sw, port)
727 tb_scan_port(port);
6ac6faee
MW
728
729 pm_runtime_mark_last_busy(&sw->dev);
730 pm_runtime_put_autosuspend(&sw->dev);
9da672a4
AN
731}
732
877e50b3 733/*
9da672a4
AN
734 * tb_scan_port() - check for and initialize switches below port
735 */
736static void tb_scan_port(struct tb_port *port)
737{
99cabbb0 738 struct tb_cm *tcm = tb_priv(port->sw->tb);
dfe40ca4 739 struct tb_port *upstream_port;
9da672a4 740 struct tb_switch *sw;
990f4b85 741 int ret;
dfe40ca4 742
9da672a4
AN
743 if (tb_is_upstream_port(port))
744 return;
4f807e47
MW
745
746 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
747 !tb_dp_port_is_enabled(port)) {
748 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
749 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
750 false);
751 return;
752 }
753
9da672a4
AN
754 if (port->config.type != TB_TYPE_PORT)
755 return;
343fcb8c
AN
756 if (port->dual_link_port && port->link_nr)
757 return; /*
758 * Downstream switch is reachable through two ports.
759 * Only scan on the primary port (link_nr == 0).
760 */
9da672a4
AN
761 if (tb_wait_for_port(port, false) <= 0)
762 return;
763 if (port->remote) {
7ea4cd6b 764 tb_port_dbg(port, "port already has a remote\n");
9da672a4
AN
765 return;
766 }
dacb1287 767
3fb10ea4 768 tb_retimer_scan(port, true);
dacb1287 769
bfe778ac
MW
770 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
771 tb_downstream_route(port));
7ea4cd6b
MW
772 if (IS_ERR(sw)) {
773 /*
774 * If there is an error accessing the connected switch
775 * it may be connected to another domain. Also we allow
776 * the other domain to be connected to a max depth switch.
777 */
778 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
779 tb_scan_xdomain(port);
9da672a4 780 return;
7ea4cd6b 781 }
bfe778ac
MW
782
783 if (tb_switch_configure(sw)) {
784 tb_switch_put(sw);
785 return;
786 }
787
7ea4cd6b
MW
788 /*
789 * If there was previously another domain connected remove it
790 * first.
791 */
792 if (port->xdomain) {
793 tb_xdomain_remove(port->xdomain);
284652a4 794 tb_port_unconfigure_xdomain(port);
7ea4cd6b
MW
795 port->xdomain = NULL;
796 }
797
99cabbb0
MW
798 /*
799 * Do not send uevents until we have discovered all existing
800 * tunnels and know which switches were authorized already by
801 * the boot firmware.
802 */
803 if (!tcm->hotplug_active)
804 dev_set_uevent_suppress(&sw->dev, true);
f67cf491 805
6ac6faee
MW
806 /*
807 * At the moment Thunderbolt 2 and beyond (devices with LC) we
808 * can support runtime PM.
809 */
810 sw->rpm = sw->generation > 1;
811
bfe778ac
MW
812 if (tb_switch_add(sw)) {
813 tb_switch_put(sw);
814 return;
815 }
816
dfe40ca4
MW
817 /* Link the switches using both links if available */
818 upstream_port = tb_upstream_port(sw);
819 port->remote = upstream_port;
820 upstream_port->remote = port;
821 if (port->dual_link_port && upstream_port->dual_link_port) {
822 port->dual_link_port->remote = upstream_port->dual_link_port;
823 upstream_port->dual_link_port->remote = port->dual_link_port;
824 }
825
91c0c120 826 /* Enable lane bonding if supported */
2ca3263a 827 tb_switch_lane_bonding_enable(sw);
de462039
MW
828 /* Set the link configured */
829 tb_switch_configure_link(sw);
b017a46d
GF
830 /*
831 * CL0s and CL1 are enabled and supported together.
832 * Silently ignore CLx enabling in case CLx is not supported.
833 */
834 ret = tb_switch_enable_clx(sw, TB_CL1);
990f4b85 835 if (ret && ret != -EOPNOTSUPP)
b017a46d
GF
836 tb_sw_warn(sw, "failed to enable %s on upstream port\n",
837 tb_switch_clx_name(TB_CL1));
8a90e4fa 838
b017a46d
GF
839 if (tb_switch_is_clx_enabled(sw, TB_CL1))
840 /*
841 * To support highest CLx state, we set router's TMU to
842 * Normal-Uni mode.
843 */
844 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
845 else
846 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
847 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
91c0c120 848
cf29b9af
RM
849 if (tb_enable_tmu(sw))
850 tb_sw_warn(sw, "failed to enable TMU\n");
851
dacb1287 852 /* Scan upstream retimers */
3fb10ea4 853 tb_retimer_scan(upstream_port, true);
dacb1287 854
e6f81858
RM
855 /*
856 * Create USB 3.x tunnels only when the switch is plugged to the
857 * domain. This is because we scan the domain also during discovery
858 * and want to discover existing USB 3.x tunnels before we create
859 * any new.
860 */
861 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
862 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
863
e876f34a 864 tb_add_dp_resources(sw);
9da672a4
AN
865 tb_scan_switch(sw);
866}
867
8afe909b
MW
868static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
869{
0bd680cd
MW
870 struct tb_port *src_port, *dst_port;
871 struct tb *tb;
872
8afe909b
MW
873 if (!tunnel)
874 return;
875
876 tb_tunnel_deactivate(tunnel);
877 list_del(&tunnel->list);
878
0bd680cd
MW
879 tb = tunnel->tb;
880 src_port = tunnel->src_port;
881 dst_port = tunnel->dst_port;
882
883 switch (tunnel->type) {
884 case TB_TUNNEL_DP:
6ce35635 885 tb_detach_bandwidth_group(src_port);
0bd680cd
MW
886 /*
887 * In case of DP tunnel make sure the DP IN resource is
888 * deallocated properly.
889 */
890 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
6ac6faee
MW
891 /* Now we can allow the domain to runtime suspend again */
892 pm_runtime_mark_last_busy(&dst_port->sw->dev);
893 pm_runtime_put_autosuspend(&dst_port->sw->dev);
894 pm_runtime_mark_last_busy(&src_port->sw->dev);
895 pm_runtime_put_autosuspend(&src_port->sw->dev);
0bd680cd
MW
896 fallthrough;
897
898 case TB_TUNNEL_USB3:
899 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
900 break;
8afe909b 901
0bd680cd
MW
902 default:
903 /*
904 * PCIe and DMA tunnels do not consume guaranteed
905 * bandwidth.
906 */
907 break;
8afe909b
MW
908 }
909
910 tb_tunnel_free(tunnel);
4f807e47
MW
911}
912
877e50b3 913/*
3364f0c1
AN
914 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
915 */
916static void tb_free_invalid_tunnels(struct tb *tb)
917{
9d3cce0b 918 struct tb_cm *tcm = tb_priv(tb);
93f36ade
MW
919 struct tb_tunnel *tunnel;
920 struct tb_tunnel *n;
9d3cce0b
MW
921
922 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
8afe909b
MW
923 if (tb_tunnel_is_invalid(tunnel))
924 tb_deactivate_and_free_tunnel(tunnel);
3364f0c1
AN
925 }
926}
927
877e50b3 928/*
23dd5bb4
AN
929 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
930 */
931static void tb_free_unplugged_children(struct tb_switch *sw)
932{
b433d010 933 struct tb_port *port;
dfe40ca4 934
b433d010 935 tb_switch_for_each_port(sw, port) {
dfe40ca4 936 if (!tb_port_has_remote(port))
23dd5bb4 937 continue;
dfe40ca4 938
23dd5bb4 939 if (port->remote->sw->is_unplugged) {
dacb1287 940 tb_retimer_remove_all(port);
8afe909b 941 tb_remove_dp_resources(port->remote->sw);
de462039 942 tb_switch_unconfigure_link(port->remote->sw);
91c0c120 943 tb_switch_lane_bonding_disable(port->remote->sw);
bfe778ac 944 tb_switch_remove(port->remote->sw);
23dd5bb4 945 port->remote = NULL;
dfe40ca4
MW
946 if (port->dual_link_port)
947 port->dual_link_port->remote = NULL;
23dd5bb4
AN
948 } else {
949 tb_free_unplugged_children(port->remote->sw);
950 }
951 }
952}
953
99cabbb0
MW
954static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
955 const struct tb_port *port)
3364f0c1 956{
b0407983
MW
957 struct tb_port *down = NULL;
958
99cabbb0
MW
959 /*
960 * To keep plugging devices consistently in the same PCIe
b0407983 961 * hierarchy, do mapping here for switch downstream PCIe ports.
99cabbb0 962 */
b0407983
MW
963 if (tb_switch_is_usb4(sw)) {
964 down = usb4_switch_map_pcie_down(sw, port);
965 } else if (!tb_route(sw)) {
99cabbb0
MW
966 int phy_port = tb_phy_port_from_link(port->port);
967 int index;
9d3cce0b 968
99cabbb0
MW
969 /*
970 * Hard-coded Thunderbolt port to PCIe down port mapping
971 * per controller.
972 */
7bffd97e
MW
973 if (tb_switch_is_cactus_ridge(sw) ||
974 tb_switch_is_alpine_ridge(sw))
99cabbb0 975 index = !phy_port ? 6 : 7;
17a8f815 976 else if (tb_switch_is_falcon_ridge(sw))
99cabbb0 977 index = !phy_port ? 6 : 8;
7bffd97e
MW
978 else if (tb_switch_is_titan_ridge(sw))
979 index = !phy_port ? 8 : 9;
99cabbb0
MW
980 else
981 goto out;
982
983 /* Validate the hard-coding */
984 if (WARN_ON(index > sw->config.max_port_number))
985 goto out;
b0407983
MW
986
987 down = &sw->ports[index];
988 }
989
990 if (down) {
991 if (WARN_ON(!tb_port_is_pcie_down(down)))
99cabbb0 992 goto out;
9cac51a0 993 if (tb_pci_port_is_enabled(down))
99cabbb0
MW
994 goto out;
995
b0407983 996 return down;
99cabbb0 997 }
3364f0c1 998
99cabbb0 999out:
e78db6f0 1000 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
99cabbb0 1001}
3364f0c1 1002
6ce35635
MW
1003static void
1004tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1005{
1006 struct tb_tunnel *first_tunnel;
1007 struct tb *tb = group->tb;
1008 struct tb_port *in;
1009 int ret;
1010
1011 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1012 group->index);
1013
1014 first_tunnel = NULL;
1015 list_for_each_entry(in, &group->ports, group_list) {
1016 int estimated_bw, estimated_up, estimated_down;
1017 struct tb_tunnel *tunnel;
1018 struct tb_port *out;
1019
1020 if (!usb4_dp_port_bw_mode_enabled(in))
1021 continue;
1022
1023 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1024 if (WARN_ON(!tunnel))
1025 break;
1026
1027 if (!first_tunnel) {
1028 /*
1029 * Since USB3 bandwidth is shared by all DP
1030 * tunnels under the host router USB4 port, even
1031 * if they do not begin from the host router, we
1032 * can release USB3 bandwidth just once and not
1033 * for each tunnel separately.
1034 */
1035 first_tunnel = tunnel;
1036 ret = tb_release_unused_usb3_bandwidth(tb,
1037 first_tunnel->src_port, first_tunnel->dst_port);
1038 if (ret) {
1039 tb_port_warn(in,
1040 "failed to release unused bandwidth\n");
1041 break;
1042 }
1043 }
1044
1045 out = tunnel->dst_port;
1046 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1047 &estimated_down);
1048 if (ret) {
1049 tb_port_warn(in,
1050 "failed to re-calculate estimated bandwidth\n");
1051 break;
1052 }
1053
1054 /*
1055 * Estimated bandwidth includes:
1056 * - already allocated bandwidth for the DP tunnel
1057 * - available bandwidth along the path
1058 * - bandwidth allocated for USB 3.x but not used.
1059 */
1060 tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
1061 estimated_up, estimated_down);
1062
1063 if (in->sw->config.depth < out->sw->config.depth)
1064 estimated_bw = estimated_down;
1065 else
1066 estimated_bw = estimated_up;
1067
1068 if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
1069 tb_port_warn(in, "failed to update estimated bandwidth\n");
1070 }
1071
1072 if (first_tunnel)
1073 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1074 first_tunnel->dst_port);
1075
1076 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1077}
1078
1079static void tb_recalc_estimated_bandwidth(struct tb *tb)
1080{
1081 struct tb_cm *tcm = tb_priv(tb);
1082 int i;
1083
1084 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1085
1086 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1087 struct tb_bandwidth_group *group = &tcm->groups[i];
1088
1089 if (!list_empty(&group->ports))
1090 tb_recalc_estimated_bandwidth_for_group(group);
1091 }
1092
1093 tb_dbg(tb, "bandwidth re-calculation done\n");
1094}
1095
e876f34a
MW
1096static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1097{
1098 struct tb_port *host_port, *port;
1099 struct tb_cm *tcm = tb_priv(tb);
1100
1101 host_port = tb_route(in->sw) ?
1102 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1103
1104 list_for_each_entry(port, &tcm->dp_resources, list) {
1105 if (!tb_port_is_dpout(port))
1106 continue;
1107
1108 if (tb_port_is_enabled(port)) {
b0ef48fc 1109 tb_port_dbg(port, "DP OUT in use\n");
e876f34a
MW
1110 continue;
1111 }
1112
1113 tb_port_dbg(port, "DP OUT available\n");
1114
1115 /*
1116 * Keep the DP tunnel under the topology starting from
1117 * the same host router downstream port.
1118 */
1119 if (host_port && tb_route(port->sw)) {
1120 struct tb_port *p;
1121
1122 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1123 if (p != host_port)
1124 continue;
1125 }
1126
1127 return port;
1128 }
1129
1130 return NULL;
1131}
1132
8afe909b 1133static void tb_tunnel_dp(struct tb *tb)
4f807e47 1134{
9d2d0a5c 1135 int available_up, available_down, ret, link_nr;
4f807e47 1136 struct tb_cm *tcm = tb_priv(tb);
8afe909b 1137 struct tb_port *port, *in, *out;
4f807e47 1138 struct tb_tunnel *tunnel;
4f807e47 1139
c6da62a2
MW
1140 if (!tb_acpi_may_tunnel_dp()) {
1141 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1142 return;
1143 }
1144
8afe909b
MW
1145 /*
1146 * Find pair of inactive DP IN and DP OUT adapters and then
1147 * establish a DP tunnel between them.
1148 */
1149 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1150
1151 in = NULL;
1152 out = NULL;
1153 list_for_each_entry(port, &tcm->dp_resources, list) {
e876f34a
MW
1154 if (!tb_port_is_dpin(port))
1155 continue;
1156
8afe909b 1157 if (tb_port_is_enabled(port)) {
b0ef48fc 1158 tb_port_dbg(port, "DP IN in use\n");
8afe909b
MW
1159 continue;
1160 }
4f807e47 1161
e876f34a 1162 tb_port_dbg(port, "DP IN available\n");
8afe909b 1163
e876f34a
MW
1164 out = tb_find_dp_out(tb, port);
1165 if (out) {
8afe909b 1166 in = port;
e876f34a
MW
1167 break;
1168 }
8afe909b
MW
1169 }
1170
1171 if (!in) {
1172 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1173 return;
1174 }
1175 if (!out) {
1176 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1177 return;
1178 }
1179
9d2d0a5c
MW
1180 /*
1181 * This is only applicable to links that are not bonded (so
1182 * when Thunderbolt 1 hardware is involved somewhere in the
1183 * topology). For these try to share the DP bandwidth between
1184 * the two lanes.
1185 */
1186 link_nr = 1;
1187 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1188 if (tb_tunnel_is_dp(tunnel)) {
1189 link_nr = 0;
1190 break;
1191 }
1192 }
1193
6ac6faee
MW
1194 /*
1195 * DP stream needs the domain to be active so runtime resume
1196 * both ends of the tunnel.
1197 *
1198 * This should bring the routers in the middle active as well
1199 * and keeps the domain from runtime suspending while the DP
1200 * tunnel is active.
1201 */
1202 pm_runtime_get_sync(&in->sw->dev);
1203 pm_runtime_get_sync(&out->sw->dev);
1204
8afe909b
MW
1205 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1206 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
6ac6faee 1207 goto err_rpm_put;
8afe909b 1208 }
4f807e47 1209
6ce35635
MW
1210 if (!tb_attach_bandwidth_group(tcm, in, out))
1211 goto err_dealloc_dp;
1212
0bd680cd
MW
1213 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1214 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1215 if (ret) {
1216 tb_warn(tb, "failed to release unused bandwidth\n");
6ce35635 1217 goto err_detach_group;
a11b88ad
MW
1218 }
1219
6ce35635 1220 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
0bd680cd 1221 if (ret)
6ce35635 1222 goto err_reclaim_usb;
0bd680cd
MW
1223
1224 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1225 available_up, available_down);
a11b88ad 1226
9d2d0a5c
MW
1227 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1228 available_down);
4f807e47 1229 if (!tunnel) {
8afe909b 1230 tb_port_dbg(out, "could not allocate DP tunnel\n");
6ce35635 1231 goto err_reclaim_usb;
4f807e47
MW
1232 }
1233
1234 if (tb_tunnel_activate(tunnel)) {
1235 tb_port_info(out, "DP tunnel activation failed, aborting\n");
0bd680cd 1236 goto err_free;
4f807e47
MW
1237 }
1238
1239 list_add_tail(&tunnel->list, &tcm->tunnel_list);
0bd680cd 1240 tb_reclaim_usb3_bandwidth(tb, in, out);
6ce35635
MW
1241
1242 /* Update the domain with the new bandwidth estimation */
1243 tb_recalc_estimated_bandwidth(tb);
1244
3084b48f
GF
1245 /*
1246 * In case of DP tunnel exists, change host router's 1st children
1247 * TMU mode to HiFi for CL0s to work.
1248 */
1249 tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
1250
8afe909b
MW
1251 return;
1252
0bd680cd
MW
1253err_free:
1254 tb_tunnel_free(tunnel);
6ce35635 1255err_reclaim_usb:
0bd680cd 1256 tb_reclaim_usb3_bandwidth(tb, in, out);
6ce35635
MW
1257err_detach_group:
1258 tb_detach_bandwidth_group(in);
0bd680cd 1259err_dealloc_dp:
8afe909b 1260 tb_switch_dealloc_dp_resource(in->sw, in);
6ac6faee
MW
1261err_rpm_put:
1262 pm_runtime_mark_last_busy(&out->sw->dev);
1263 pm_runtime_put_autosuspend(&out->sw->dev);
1264 pm_runtime_mark_last_busy(&in->sw->dev);
1265 pm_runtime_put_autosuspend(&in->sw->dev);
4f807e47
MW
1266}
1267
8afe909b 1268static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
4f807e47 1269{
8afe909b
MW
1270 struct tb_port *in, *out;
1271 struct tb_tunnel *tunnel;
1272
1273 if (tb_port_is_dpin(port)) {
1274 tb_port_dbg(port, "DP IN resource unavailable\n");
1275 in = port;
1276 out = NULL;
1277 } else {
1278 tb_port_dbg(port, "DP OUT resource unavailable\n");
1279 in = NULL;
1280 out = port;
1281 }
1282
1283 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1284 tb_deactivate_and_free_tunnel(tunnel);
1285 list_del_init(&port->list);
1286
1287 /*
1288 * See if there is another DP OUT port that can be used for
1289 * to create another tunnel.
1290 */
6ce35635 1291 tb_recalc_estimated_bandwidth(tb);
8afe909b
MW
1292 tb_tunnel_dp(tb);
1293}
1294
1295static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1296{
1297 struct tb_cm *tcm = tb_priv(tb);
1298 struct tb_port *p;
1299
1300 if (tb_port_is_enabled(port))
1301 return;
1302
1303 list_for_each_entry(p, &tcm->dp_resources, list) {
1304 if (p == port)
1305 return;
1306 }
1307
1308 tb_port_dbg(port, "DP %s resource available\n",
1309 tb_port_is_dpin(port) ? "IN" : "OUT");
1310 list_add_tail(&port->list, &tcm->dp_resources);
1311
1312 /* Look for suitable DP IN <-> DP OUT pairs now */
1313 tb_tunnel_dp(tb);
4f807e47
MW
1314}
1315
81a2e3e4
MW
1316static void tb_disconnect_and_release_dp(struct tb *tb)
1317{
1318 struct tb_cm *tcm = tb_priv(tb);
1319 struct tb_tunnel *tunnel, *n;
1320
1321 /*
1322 * Tear down all DP tunnels and release their resources. They
1323 * will be re-established after resume based on plug events.
1324 */
1325 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1326 if (tb_tunnel_is_dp(tunnel))
1327 tb_deactivate_and_free_tunnel(tunnel);
1328 }
1329
1330 while (!list_empty(&tcm->dp_resources)) {
1331 struct tb_port *port;
1332
1333 port = list_first_entry(&tcm->dp_resources,
1334 struct tb_port, list);
1335 list_del_init(&port->list);
1336 }
1337}
1338
3da88be2
MW
1339static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1340{
1341 struct tb_tunnel *tunnel;
1342 struct tb_port *up;
1343
1344 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1345 if (WARN_ON(!up))
1346 return -ENODEV;
1347
1348 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1349 if (WARN_ON(!tunnel))
1350 return -ENODEV;
1351
30a4eca6
MW
1352 tb_switch_xhci_disconnect(sw);
1353
3da88be2
MW
1354 tb_tunnel_deactivate(tunnel);
1355 list_del(&tunnel->list);
1356 tb_tunnel_free(tunnel);
1357 return 0;
1358}
1359
99cabbb0
MW
1360static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1361{
1362 struct tb_port *up, *down, *port;
1363 struct tb_cm *tcm = tb_priv(tb);
1364 struct tb_switch *parent_sw;
1365 struct tb_tunnel *tunnel;
3364f0c1 1366
386e5e29 1367 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
99cabbb0
MW
1368 if (!up)
1369 return 0;
3364f0c1 1370
99cabbb0
MW
1371 /*
1372 * Look up available down port. Since we are chaining it should
1373 * be found right above this switch.
1374 */
1375 parent_sw = tb_to_switch(sw->dev.parent);
1376 port = tb_port_at(tb_route(sw), parent_sw);
1377 down = tb_find_pcie_down(parent_sw, port);
1378 if (!down)
1379 return 0;
1380
1381 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1382 if (!tunnel)
1383 return -ENOMEM;
1384
1385 if (tb_tunnel_activate(tunnel)) {
1386 tb_port_info(up,
1387 "PCIe tunnel activation failed, aborting\n");
1388 tb_tunnel_free(tunnel);
1389 return -EIO;
3364f0c1 1390 }
99cabbb0 1391
43f977bc
GF
1392 /*
1393 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1394 * here.
1395 */
1396 if (tb_switch_pcie_l1_enable(sw))
1397 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1398
30a4eca6
MW
1399 if (tb_switch_xhci_connect(sw))
1400 tb_sw_warn(sw, "failed to connect xHCI\n");
1401
99cabbb0
MW
1402 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1403 return 0;
3364f0c1 1404}
9da672a4 1405
180b0689
MW
1406static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1407 int transmit_path, int transmit_ring,
1408 int receive_path, int receive_ring)
7ea4cd6b
MW
1409{
1410 struct tb_cm *tcm = tb_priv(tb);
1411 struct tb_port *nhi_port, *dst_port;
1412 struct tb_tunnel *tunnel;
1413 struct tb_switch *sw;
1414
1415 sw = tb_to_switch(xd->dev.parent);
1416 dst_port = tb_port_at(xd->route, sw);
386e5e29 1417 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
7ea4cd6b
MW
1418
1419 mutex_lock(&tb->lock);
180b0689
MW
1420 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1421 transmit_ring, receive_path, receive_ring);
7ea4cd6b
MW
1422 if (!tunnel) {
1423 mutex_unlock(&tb->lock);
1424 return -ENOMEM;
1425 }
1426
1427 if (tb_tunnel_activate(tunnel)) {
1428 tb_port_info(nhi_port,
1429 "DMA tunnel activation failed, aborting\n");
1430 tb_tunnel_free(tunnel);
1431 mutex_unlock(&tb->lock);
1432 return -EIO;
1433 }
1434
1435 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1436 mutex_unlock(&tb->lock);
1437 return 0;
1438}
1439
180b0689
MW
1440static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1441 int transmit_path, int transmit_ring,
1442 int receive_path, int receive_ring)
7ea4cd6b 1443{
180b0689
MW
1444 struct tb_cm *tcm = tb_priv(tb);
1445 struct tb_port *nhi_port, *dst_port;
1446 struct tb_tunnel *tunnel, *n;
7ea4cd6b
MW
1447 struct tb_switch *sw;
1448
1449 sw = tb_to_switch(xd->dev.parent);
1450 dst_port = tb_port_at(xd->route, sw);
180b0689 1451 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
7ea4cd6b 1452
180b0689
MW
1453 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1454 if (!tb_tunnel_is_dma(tunnel))
1455 continue;
1456 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1457 continue;
1458
1459 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1460 receive_path, receive_ring))
1461 tb_deactivate_and_free_tunnel(tunnel);
1462 }
7ea4cd6b
MW
1463}
1464
180b0689
MW
1465static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1466 int transmit_path, int transmit_ring,
1467 int receive_path, int receive_ring)
7ea4cd6b
MW
1468{
1469 if (!xd->is_unplugged) {
1470 mutex_lock(&tb->lock);
180b0689
MW
1471 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1472 transmit_ring, receive_path,
1473 receive_ring);
7ea4cd6b
MW
1474 mutex_unlock(&tb->lock);
1475 }
1476 return 0;
1477}
1478
d6cc51cd
AN
1479/* hotplug handling */
1480
877e50b3 1481/*
d6cc51cd
AN
1482 * tb_handle_hotplug() - handle hotplug event
1483 *
1484 * Executes on tb->wq.
1485 */
1486static void tb_handle_hotplug(struct work_struct *work)
1487{
1488 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1489 struct tb *tb = ev->tb;
9d3cce0b 1490 struct tb_cm *tcm = tb_priv(tb);
053596d9
AN
1491 struct tb_switch *sw;
1492 struct tb_port *port;
284652a4 1493
6ac6faee
MW
1494 /* Bring the domain back from sleep if it was suspended */
1495 pm_runtime_get_sync(&tb->dev);
1496
d6cc51cd 1497 mutex_lock(&tb->lock);
9d3cce0b 1498 if (!tcm->hotplug_active)
d6cc51cd
AN
1499 goto out; /* during init, suspend or shutdown */
1500
8f965efd 1501 sw = tb_switch_find_by_route(tb, ev->route);
053596d9
AN
1502 if (!sw) {
1503 tb_warn(tb,
1504 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1505 ev->route, ev->port, ev->unplug);
1506 goto out;
1507 }
1508 if (ev->port > sw->config.max_port_number) {
1509 tb_warn(tb,
1510 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1511 ev->route, ev->port, ev->unplug);
8f965efd 1512 goto put_sw;
053596d9
AN
1513 }
1514 port = &sw->ports[ev->port];
1515 if (tb_is_upstream_port(port)) {
dfe40ca4
MW
1516 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1517 ev->route, ev->port, ev->unplug);
8f965efd 1518 goto put_sw;
053596d9 1519 }
6ac6faee
MW
1520
1521 pm_runtime_get_sync(&sw->dev);
1522
053596d9 1523 if (ev->unplug) {
dacb1287
KK
1524 tb_retimer_remove_all(port);
1525
dfe40ca4 1526 if (tb_port_has_remote(port)) {
7ea4cd6b 1527 tb_port_dbg(port, "switch unplugged\n");
aae20bb6 1528 tb_sw_set_unplugged(port->remote->sw);
3364f0c1 1529 tb_free_invalid_tunnels(tb);
8afe909b 1530 tb_remove_dp_resources(port->remote->sw);
cf29b9af 1531 tb_switch_tmu_disable(port->remote->sw);
de462039 1532 tb_switch_unconfigure_link(port->remote->sw);
91c0c120 1533 tb_switch_lane_bonding_disable(port->remote->sw);
bfe778ac 1534 tb_switch_remove(port->remote->sw);
053596d9 1535 port->remote = NULL;
dfe40ca4
MW
1536 if (port->dual_link_port)
1537 port->dual_link_port->remote = NULL;
8afe909b 1538 /* Maybe we can create another DP tunnel */
6ce35635 1539 tb_recalc_estimated_bandwidth(tb);
8afe909b 1540 tb_tunnel_dp(tb);
7ea4cd6b
MW
1541 } else if (port->xdomain) {
1542 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1543
1544 tb_port_dbg(port, "xdomain unplugged\n");
1545 /*
1546 * Service drivers are unbound during
1547 * tb_xdomain_remove() so setting XDomain as
1548 * unplugged here prevents deadlock if they call
1549 * tb_xdomain_disable_paths(). We will tear down
180b0689 1550 * all the tunnels below.
7ea4cd6b
MW
1551 */
1552 xd->is_unplugged = true;
1553 tb_xdomain_remove(xd);
1554 port->xdomain = NULL;
180b0689 1555 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
7ea4cd6b 1556 tb_xdomain_put(xd);
284652a4 1557 tb_port_unconfigure_xdomain(port);
8afe909b
MW
1558 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1559 tb_dp_resource_unavailable(tb, port);
30a4eca6
MW
1560 } else if (!port->port) {
1561 tb_sw_dbg(sw, "xHCI disconnect request\n");
1562 tb_switch_xhci_disconnect(sw);
053596d9 1563 } else {
62efe699
MW
1564 tb_port_dbg(port,
1565 "got unplug event for disconnected port, ignoring\n");
053596d9
AN
1566 }
1567 } else if (port->remote) {
62efe699 1568 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
30a4eca6
MW
1569 } else if (!port->port && sw->authorized) {
1570 tb_sw_dbg(sw, "xHCI connect request\n");
1571 tb_switch_xhci_connect(sw);
053596d9 1572 } else {
344e0643 1573 if (tb_port_is_null(port)) {
62efe699 1574 tb_port_dbg(port, "hotplug: scanning\n");
344e0643
MW
1575 tb_scan_port(port);
1576 if (!port->remote)
62efe699 1577 tb_port_dbg(port, "hotplug: no switch found\n");
8afe909b
MW
1578 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1579 tb_dp_resource_available(tb, port);
344e0643 1580 }
053596d9 1581 }
8f965efd 1582
6ac6faee
MW
1583 pm_runtime_mark_last_busy(&sw->dev);
1584 pm_runtime_put_autosuspend(&sw->dev);
1585
8f965efd
MW
1586put_sw:
1587 tb_switch_put(sw);
d6cc51cd
AN
1588out:
1589 mutex_unlock(&tb->lock);
6ac6faee
MW
1590
1591 pm_runtime_mark_last_busy(&tb->dev);
1592 pm_runtime_put_autosuspend(&tb->dev);
1593
d6cc51cd
AN
1594 kfree(ev);
1595}
1596
6ce35635
MW
1597static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
1598 int *requested_down)
1599{
1600 int allocated_up, allocated_down, available_up, available_down, ret;
1601 int requested_up_corrected, requested_down_corrected, granularity;
1602 int max_up, max_down, max_up_rounded, max_down_rounded;
1603 struct tb *tb = tunnel->tb;
1604 struct tb_port *in, *out;
1605
1606 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
1607 if (ret)
1608 return ret;
1609
1610 in = tunnel->src_port;
1611 out = tunnel->dst_port;
1612
1613 tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
1614 allocated_up, allocated_down);
1615
1616 /*
1617 * If we get rounded up request from graphics side, say HBR2 x 4
1618 * that is 17500 instead of 17280 (this is because of the
1619 * granularity), we allow it too. Here the graphics has already
1620 * negotiated with the DPRX the maximum possible rates (which is
1621 * 17280 in this case).
1622 *
1623 * Since the link cannot go higher than 17280 we use that in our
1624 * calculations but the DP IN adapter Allocated BW write must be
1625 * the same value (17500) otherwise the adapter will mark it as
1626 * failed for graphics.
1627 */
1628 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
1629 if (ret)
1630 return ret;
1631
1632 ret = usb4_dp_port_granularity(in);
1633 if (ret < 0)
1634 return ret;
1635 granularity = ret;
1636
1637 max_up_rounded = roundup(max_up, granularity);
1638 max_down_rounded = roundup(max_down, granularity);
1639
1640 /*
1641 * This will "fix" the request down to the maximum supported
1642 * rate * lanes if it is at the maximum rounded up level.
1643 */
1644 requested_up_corrected = *requested_up;
1645 if (requested_up_corrected == max_up_rounded)
1646 requested_up_corrected = max_up;
1647 else if (requested_up_corrected < 0)
1648 requested_up_corrected = 0;
1649 requested_down_corrected = *requested_down;
1650 if (requested_down_corrected == max_down_rounded)
1651 requested_down_corrected = max_down;
1652 else if (requested_down_corrected < 0)
1653 requested_down_corrected = 0;
1654
1655 tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
1656 requested_up_corrected, requested_down_corrected);
1657
1658 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
1659 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
1660 tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
1661 requested_up_corrected, requested_down_corrected,
1662 max_up_rounded, max_down_rounded);
1663 return -ENOBUFS;
1664 }
1665
1666 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
1667 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
1668 /*
1669 * If requested bandwidth is less or equal than what is
1670 * currently allocated to that tunnel we simply change
1671 * the reservation of the tunnel. Since all the tunnels
1672 * going out from the same USB4 port are in the same
1673 * group the released bandwidth will be taken into
1674 * account for the other tunnels automatically below.
1675 */
1676 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1677 requested_down);
1678 }
1679
1680 /*
1681 * More bandwidth is requested. Release all the potential
1682 * bandwidth from USB3 first.
1683 */
1684 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1685 if (ret)
1686 return ret;
1687
1688 /*
1689 * Then go over all tunnels that cross the same USB4 ports (they
1690 * are also in the same group but we use the same function here
1691 * that we use with the normal bandwidth allocation).
1692 */
1693 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1694 if (ret)
1695 goto reclaim;
1696
1697 tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
1698 available_up, available_down);
1699
1700 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
1701 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
1702 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1703 requested_down);
1704 } else {
1705 ret = -ENOBUFS;
1706 }
1707
1708reclaim:
1709 tb_reclaim_usb3_bandwidth(tb, in, out);
1710 return ret;
1711}
1712
1713static void tb_handle_dp_bandwidth_request(struct work_struct *work)
1714{
1715 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1716 int requested_bw, requested_up, requested_down, ret;
1717 struct tb_port *in, *out;
1718 struct tb_tunnel *tunnel;
1719 struct tb *tb = ev->tb;
1720 struct tb_cm *tcm = tb_priv(tb);
1721 struct tb_switch *sw;
1722
1723 pm_runtime_get_sync(&tb->dev);
1724
1725 mutex_lock(&tb->lock);
1726 if (!tcm->hotplug_active)
1727 goto unlock;
1728
1729 sw = tb_switch_find_by_route(tb, ev->route);
1730 if (!sw) {
1731 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
1732 ev->route);
1733 goto unlock;
1734 }
1735
1736 in = &sw->ports[ev->port];
1737 if (!tb_port_is_dpin(in)) {
1738 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
1739 goto unlock;
1740 }
1741
1742 tb_port_dbg(in, "handling bandwidth allocation request\n");
1743
1744 if (!usb4_dp_port_bw_mode_enabled(in)) {
1745 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
1746 goto unlock;
1747 }
1748
ace75e18
MW
1749 ret = usb4_dp_port_requested_bw(in);
1750 if (ret < 0) {
1751 if (ret == -ENODATA)
1752 tb_port_dbg(in, "no bandwidth request active\n");
1753 else
1754 tb_port_warn(in, "failed to read requested bandwidth\n");
6ce35635
MW
1755 goto unlock;
1756 }
ace75e18 1757 requested_bw = ret;
6ce35635
MW
1758
1759 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
1760
1761 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1762 if (!tunnel) {
1763 tb_port_warn(in, "failed to find tunnel\n");
1764 goto unlock;
1765 }
1766
1767 out = tunnel->dst_port;
1768
1769 if (in->sw->config.depth < out->sw->config.depth) {
1770 requested_up = -1;
1771 requested_down = requested_bw;
1772 } else {
1773 requested_up = requested_bw;
1774 requested_down = -1;
1775 }
1776
1777 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
1778 if (ret) {
1779 if (ret == -ENOBUFS)
1780 tb_port_warn(in, "not enough bandwidth available\n");
1781 else
1782 tb_port_warn(in, "failed to change bandwidth allocation\n");
1783 } else {
1784 tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
1785 requested_up, requested_down);
1786
1787 /* Update other clients about the allocation change */
1788 tb_recalc_estimated_bandwidth(tb);
1789 }
1790
1791unlock:
1792 mutex_unlock(&tb->lock);
1793
1794 pm_runtime_mark_last_busy(&tb->dev);
1795 pm_runtime_put_autosuspend(&tb->dev);
1796}
1797
1798static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
1799{
1800 struct tb_hotplug_event *ev;
1801
1802 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1803 if (!ev)
1804 return;
1805
1806 ev->tb = tb;
1807 ev->route = route;
1808 ev->port = port;
1809 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
1810 queue_work(tb->wq, &ev->work);
1811}
1812
1813static void tb_handle_notification(struct tb *tb, u64 route,
1814 const struct cfg_error_pkg *error)
1815{
1816 if (tb_cfg_ack_notification(tb->ctl, route, error))
1817 tb_warn(tb, "could not ack notification on %llx\n", route);
1818
1819 switch (error->error) {
1820 case TB_CFG_ERROR_DP_BW:
1821 tb_queue_dp_bandwidth_request(tb, route, error->port);
1822 break;
1823
1824 default:
1825 /* Ack is enough */
1826 return;
1827 }
1828}
1829
877e50b3 1830/*
d6cc51cd
AN
1831 * tb_schedule_hotplug_handler() - callback function for the control channel
1832 *
1833 * Delegates to tb_handle_hotplug.
1834 */
81a54b5e
MW
1835static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1836 const void *buf, size_t size)
d6cc51cd 1837{
81a54b5e 1838 const struct cfg_event_pkg *pkg = buf;
6ce35635 1839 u64 route = tb_cfg_get_route(&pkg->header);
81a54b5e 1840
6ce35635
MW
1841 switch (type) {
1842 case TB_CFG_PKG_ERROR:
1843 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
1844 return;
1845 case TB_CFG_PKG_EVENT:
1846 break;
1847 default:
81a54b5e
MW
1848 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1849 return;
1850 }
1851
210e9f56 1852 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
81a54b5e
MW
1853 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1854 pkg->port);
1855 }
1856
4f807e47 1857 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
d6cc51cd
AN
1858}
1859
9d3cce0b 1860static void tb_stop(struct tb *tb)
d6cc51cd 1861{
9d3cce0b 1862 struct tb_cm *tcm = tb_priv(tb);
93f36ade
MW
1863 struct tb_tunnel *tunnel;
1864 struct tb_tunnel *n;
3364f0c1 1865
6ac6faee 1866 cancel_delayed_work(&tcm->remove_work);
3364f0c1 1867 /* tunnels are only present after everything has been initialized */
7ea4cd6b
MW
1868 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1869 /*
1870 * DMA tunnels require the driver to be functional so we
1871 * tear them down. Other protocol tunnels can be left
1872 * intact.
1873 */
1874 if (tb_tunnel_is_dma(tunnel))
1875 tb_tunnel_deactivate(tunnel);
93f36ade 1876 tb_tunnel_free(tunnel);
7ea4cd6b 1877 }
bfe778ac 1878 tb_switch_remove(tb->root_switch);
9d3cce0b 1879 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
d6cc51cd
AN
1880}
1881
99cabbb0
MW
1882static int tb_scan_finalize_switch(struct device *dev, void *data)
1883{
1884 if (tb_is_switch(dev)) {
1885 struct tb_switch *sw = tb_to_switch(dev);
1886
1887 /*
1888 * If we found that the switch was already setup by the
1889 * boot firmware, mark it as authorized now before we
1890 * send uevent to userspace.
1891 */
1892 if (sw->boot)
1893 sw->authorized = 1;
1894
1895 dev_set_uevent_suppress(dev, false);
1896 kobject_uevent(&dev->kobj, KOBJ_ADD);
1897 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1898 }
1899
1900 return 0;
1901}
1902
9d3cce0b 1903static int tb_start(struct tb *tb)
d6cc51cd 1904{
9d3cce0b 1905 struct tb_cm *tcm = tb_priv(tb);
bfe778ac 1906 int ret;
d6cc51cd 1907
bfe778ac 1908 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
444ac384
MW
1909 if (IS_ERR(tb->root_switch))
1910 return PTR_ERR(tb->root_switch);
a25c8b2f 1911
e6b245cc
MW
1912 /*
1913 * ICM firmware upgrade needs running firmware and in native
1914 * mode that is not available so disable firmware upgrade of the
1915 * root switch.
5172eb9a
SC
1916 *
1917 * However, USB4 routers support NVM firmware upgrade if they
1918 * implement the necessary router operations.
e6b245cc 1919 */
5172eb9a 1920 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
6ac6faee
MW
1921 /* All USB4 routers support runtime PM */
1922 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
e6b245cc 1923
bfe778ac
MW
1924 ret = tb_switch_configure(tb->root_switch);
1925 if (ret) {
1926 tb_switch_put(tb->root_switch);
1927 return ret;
1928 }
1929
1930 /* Announce the switch to the world */
1931 ret = tb_switch_add(tb->root_switch);
1932 if (ret) {
1933 tb_switch_put(tb->root_switch);
1934 return ret;
1935 }
1936
b017a46d
GF
1937 /*
1938 * To support highest CLx state, we set host router's TMU to
1939 * Normal mode.
1940 */
1941 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1942 false);
cf29b9af
RM
1943 /* Enable TMU if it is off */
1944 tb_switch_tmu_enable(tb->root_switch);
9da672a4
AN
1945 /* Full scan to discover devices added before the driver was loaded. */
1946 tb_scan_switch(tb->root_switch);
0414bec5 1947 /* Find out tunnels created by the boot firmware */
43bddb26 1948 tb_discover_tunnels(tb);
b60e31bf
SM
1949 /* Add DP resources from the DP tunnels created by the boot firmware */
1950 tb_discover_dp_resources(tb);
e6f81858
RM
1951 /*
1952 * If the boot firmware did not create USB 3.x tunnels create them
1953 * now for the whole topology.
1954 */
1955 tb_create_usb3_tunnels(tb->root_switch);
8afe909b
MW
1956 /* Add DP IN resources for the root switch */
1957 tb_add_dp_resources(tb->root_switch);
99cabbb0
MW
1958 /* Make the discovered switches available to the userspace */
1959 device_for_each_child(&tb->root_switch->dev, NULL,
1960 tb_scan_finalize_switch);
9da672a4 1961
d6cc51cd 1962 /* Allow tb_handle_hotplug to progress events */
9d3cce0b
MW
1963 tcm->hotplug_active = true;
1964 return 0;
d6cc51cd
AN
1965}
1966
9d3cce0b 1967static int tb_suspend_noirq(struct tb *tb)
23dd5bb4 1968{
9d3cce0b
MW
1969 struct tb_cm *tcm = tb_priv(tb);
1970
daa5140f 1971 tb_dbg(tb, "suspending...\n");
81a2e3e4 1972 tb_disconnect_and_release_dp(tb);
6ac6faee 1973 tb_switch_suspend(tb->root_switch, false);
9d3cce0b 1974 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
daa5140f 1975 tb_dbg(tb, "suspend finished\n");
9d3cce0b
MW
1976
1977 return 0;
23dd5bb4
AN
1978}
1979
91c0c120
MW
1980static void tb_restore_children(struct tb_switch *sw)
1981{
1982 struct tb_port *port;
990f4b85 1983 int ret;
91c0c120 1984
6ac6faee
MW
1985 /* No need to restore if the router is already unplugged */
1986 if (sw->is_unplugged)
1987 return;
1988
a28ec0e1 1989 /*
b017a46d
GF
1990 * CL0s and CL1 are enabled and supported together.
1991 * Silently ignore CLx re-enabling in case CLx is not supported.
a28ec0e1 1992 */
b017a46d
GF
1993 ret = tb_switch_enable_clx(sw, TB_CL1);
1994 if (ret && ret != -EOPNOTSUPP)
1995 tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
1996 tb_switch_clx_name(TB_CL1));
1997
1998 if (tb_switch_is_clx_enabled(sw, TB_CL1))
1999 /*
2000 * To support highest CLx state, we set router's TMU to
2001 * Normal-Uni mode.
2002 */
2003 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
2004 else
2005 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
2006 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
2007
cf29b9af
RM
2008 if (tb_enable_tmu(sw))
2009 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2010
91c0c120 2011 tb_switch_for_each_port(sw, port) {
284652a4 2012 if (!tb_port_has_remote(port) && !port->xdomain)
91c0c120
MW
2013 continue;
2014
284652a4
MW
2015 if (port->remote) {
2016 tb_switch_lane_bonding_enable(port->remote->sw);
2017 tb_switch_configure_link(port->remote->sw);
91c0c120 2018
284652a4
MW
2019 tb_restore_children(port->remote->sw);
2020 } else if (port->xdomain) {
f9cad07b 2021 tb_port_configure_xdomain(port, port->xdomain);
284652a4 2022 }
91c0c120
MW
2023 }
2024}
2025
9d3cce0b 2026static int tb_resume_noirq(struct tb *tb)
23dd5bb4 2027{
9d3cce0b 2028 struct tb_cm *tcm = tb_priv(tb);
93f36ade 2029 struct tb_tunnel *tunnel, *n;
43bddb26
MW
2030 unsigned int usb3_delay = 0;
2031 LIST_HEAD(tunnels);
9d3cce0b 2032
daa5140f 2033 tb_dbg(tb, "resuming...\n");
23dd5bb4
AN
2034
2035 /* remove any pci devices the firmware might have setup */
356b6c4e 2036 tb_switch_reset(tb->root_switch);
23dd5bb4
AN
2037
2038 tb_switch_resume(tb->root_switch);
2039 tb_free_invalid_tunnels(tb);
2040 tb_free_unplugged_children(tb->root_switch);
91c0c120 2041 tb_restore_children(tb->root_switch);
43bddb26
MW
2042
2043 /*
2044 * If we get here from suspend to disk the boot firmware or the
2045 * restore kernel might have created tunnels of its own. Since
2046 * we cannot be sure they are usable for us we find and tear
2047 * them down.
2048 */
2049 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2050 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2051 if (tb_tunnel_is_usb3(tunnel))
2052 usb3_delay = 500;
2053 tb_tunnel_deactivate(tunnel);
2054 tb_tunnel_free(tunnel);
2055 }
2056
2057 /* Re-create our tunnels now */
2058 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2059 /* USB3 requires delay before it can be re-activated */
2060 if (tb_tunnel_is_usb3(tunnel)) {
2061 msleep(usb3_delay);
2062 /* Only need to do it once */
2063 usb3_delay = 0;
2064 }
93f36ade 2065 tb_tunnel_restart(tunnel);
43bddb26 2066 }
9d3cce0b 2067 if (!list_empty(&tcm->tunnel_list)) {
23dd5bb4
AN
2068 /*
2069 * the pcie links need some time to get going.
2070 * 100ms works for me...
2071 */
daa5140f 2072 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
23dd5bb4
AN
2073 msleep(100);
2074 }
2075 /* Allow tb_handle_hotplug to progress events */
9d3cce0b 2076 tcm->hotplug_active = true;
daa5140f 2077 tb_dbg(tb, "resume finished\n");
9d3cce0b
MW
2078
2079 return 0;
2080}
2081
7ea4cd6b
MW
2082static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2083{
b433d010
MW
2084 struct tb_port *port;
2085 int ret = 0;
7ea4cd6b 2086
b433d010 2087 tb_switch_for_each_port(sw, port) {
7ea4cd6b
MW
2088 if (tb_is_upstream_port(port))
2089 continue;
2090 if (port->xdomain && port->xdomain->is_unplugged) {
dacb1287 2091 tb_retimer_remove_all(port);
7ea4cd6b 2092 tb_xdomain_remove(port->xdomain);
284652a4 2093 tb_port_unconfigure_xdomain(port);
7ea4cd6b
MW
2094 port->xdomain = NULL;
2095 ret++;
2096 } else if (port->remote) {
2097 ret += tb_free_unplugged_xdomains(port->remote->sw);
2098 }
2099 }
2100
2101 return ret;
2102}
2103
884e4d57
MW
2104static int tb_freeze_noirq(struct tb *tb)
2105{
2106 struct tb_cm *tcm = tb_priv(tb);
2107
2108 tcm->hotplug_active = false;
2109 return 0;
2110}
2111
2112static int tb_thaw_noirq(struct tb *tb)
2113{
2114 struct tb_cm *tcm = tb_priv(tb);
2115
2116 tcm->hotplug_active = true;
2117 return 0;
2118}
2119
7ea4cd6b
MW
2120static void tb_complete(struct tb *tb)
2121{
2122 /*
2123 * Release any unplugged XDomains and if there is a case where
2124 * another domain is swapped in place of unplugged XDomain we
2125 * need to run another rescan.
2126 */
2127 mutex_lock(&tb->lock);
2128 if (tb_free_unplugged_xdomains(tb->root_switch))
2129 tb_scan_switch(tb->root_switch);
2130 mutex_unlock(&tb->lock);
2131}
2132
6ac6faee
MW
2133static int tb_runtime_suspend(struct tb *tb)
2134{
2135 struct tb_cm *tcm = tb_priv(tb);
2136
2137 mutex_lock(&tb->lock);
2138 tb_switch_suspend(tb->root_switch, true);
2139 tcm->hotplug_active = false;
2140 mutex_unlock(&tb->lock);
2141
2142 return 0;
2143}
2144
2145static void tb_remove_work(struct work_struct *work)
2146{
2147 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2148 struct tb *tb = tcm_to_tb(tcm);
2149
2150 mutex_lock(&tb->lock);
2151 if (tb->root_switch) {
2152 tb_free_unplugged_children(tb->root_switch);
2153 tb_free_unplugged_xdomains(tb->root_switch);
2154 }
2155 mutex_unlock(&tb->lock);
2156}
2157
2158static int tb_runtime_resume(struct tb *tb)
2159{
2160 struct tb_cm *tcm = tb_priv(tb);
2161 struct tb_tunnel *tunnel, *n;
2162
2163 mutex_lock(&tb->lock);
2164 tb_switch_resume(tb->root_switch);
2165 tb_free_invalid_tunnels(tb);
2166 tb_restore_children(tb->root_switch);
2167 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2168 tb_tunnel_restart(tunnel);
2169 tcm->hotplug_active = true;
2170 mutex_unlock(&tb->lock);
2171
2172 /*
2173 * Schedule cleanup of any unplugged devices. Run this in a
2174 * separate thread to avoid possible deadlock if the device
2175 * removal runtime resumes the unplugged device.
2176 */
2177 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2178 return 0;
2179}
2180
9d3cce0b
MW
2181static const struct tb_cm_ops tb_cm_ops = {
2182 .start = tb_start,
2183 .stop = tb_stop,
2184 .suspend_noirq = tb_suspend_noirq,
2185 .resume_noirq = tb_resume_noirq,
884e4d57
MW
2186 .freeze_noirq = tb_freeze_noirq,
2187 .thaw_noirq = tb_thaw_noirq,
7ea4cd6b 2188 .complete = tb_complete,
6ac6faee
MW
2189 .runtime_suspend = tb_runtime_suspend,
2190 .runtime_resume = tb_runtime_resume,
81a54b5e 2191 .handle_event = tb_handle_event,
3da88be2 2192 .disapprove_switch = tb_disconnect_pci,
99cabbb0 2193 .approve_switch = tb_tunnel_pci,
7ea4cd6b
MW
2194 .approve_xdomain_paths = tb_approve_xdomain_paths,
2195 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
9d3cce0b
MW
2196};
2197
349bfe08
MW
2198/*
2199 * During suspend the Thunderbolt controller is reset and all PCIe
2200 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2201 * during resume. This adds device links between the tunneled PCIe
2202 * downstream ports and the NHI so that the device core will make sure
2203 * NHI is resumed first before the rest.
2204 */
2205static void tb_apple_add_links(struct tb_nhi *nhi)
2206{
2207 struct pci_dev *upstream, *pdev;
2208
2209 if (!x86_apple_machine)
2210 return;
2211
2212 switch (nhi->pdev->device) {
2213 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2214 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2215 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2216 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2217 break;
2218 default:
2219 return;
2220 }
2221
2222 upstream = pci_upstream_bridge(nhi->pdev);
2223 while (upstream) {
2224 if (!pci_is_pcie(upstream))
2225 return;
2226 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2227 break;
2228 upstream = pci_upstream_bridge(upstream);
2229 }
2230
2231 if (!upstream)
2232 return;
2233
2234 /*
2235 * For each hotplug downstream port, create add device link
2236 * back to NHI so that PCIe tunnels can be re-established after
2237 * sleep.
2238 */
2239 for_each_pci_bridge(pdev, upstream->subordinate) {
2240 const struct device_link *link;
2241
2242 if (!pci_is_pcie(pdev))
2243 continue;
2244 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2245 !pdev->is_hotplug_bridge)
2246 continue;
2247
2248 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2249 DL_FLAG_AUTOREMOVE_SUPPLIER |
2250 DL_FLAG_PM_RUNTIME);
2251 if (link) {
2252 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2253 dev_name(&pdev->dev));
2254 } else {
2255 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2256 dev_name(&pdev->dev));
2257 }
2258 }
2259}
2260
9d3cce0b
MW
2261struct tb *tb_probe(struct tb_nhi *nhi)
2262{
2263 struct tb_cm *tcm;
2264 struct tb *tb;
2265
7f0a34d7 2266 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
9d3cce0b
MW
2267 if (!tb)
2268 return NULL;
2269
c6da62a2
MW
2270 if (tb_acpi_may_tunnel_pcie())
2271 tb->security_level = TB_SECURITY_USER;
2272 else
2273 tb->security_level = TB_SECURITY_NOPCIE;
2274
9d3cce0b
MW
2275 tb->cm_ops = &tb_cm_ops;
2276
2277 tcm = tb_priv(tb);
2278 INIT_LIST_HEAD(&tcm->tunnel_list);
8afe909b 2279 INIT_LIST_HEAD(&tcm->dp_resources);
6ac6faee 2280 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
6ce35635 2281 tb_init_bandwidth_groups(tcm);
9d3cce0b 2282
e0258805
MW
2283 tb_dbg(tb, "using software connection manager\n");
2284
349bfe08
MW
2285 tb_apple_add_links(nhi);
2286 tb_acpi_add_links(nhi);
2287
9d3cce0b 2288 return tb;
23dd5bb4 2289}