Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6cc51cd | 2 | /* |
99cabbb0 | 3 | * Thunderbolt driver - bus logic (NHI independent) |
d6cc51cd AN |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
99cabbb0 | 6 | * Copyright (C) 2019, Intel Corporation |
d6cc51cd AN |
7 | */ |
8 | ||
9 | #include <linux/slab.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/delay.h> | |
6ac6faee | 12 | #include <linux/pm_runtime.h> |
349bfe08 | 13 | #include <linux/platform_data/x86/apple.h> |
d6cc51cd AN |
14 | |
15 | #include "tb.h" | |
7adf6097 | 16 | #include "tb_regs.h" |
1752b9f7 | 17 | #include "tunnel.h" |
d6cc51cd | 18 | |
6ce35635 MW |
19 | #define TB_TIMEOUT 100 /* ms */ |
20 | #define MAX_GROUPS 7 /* max Group_ID is 7 */ | |
7f0a34d7 | 21 | |
9d3cce0b MW |
22 | /** |
23 | * struct tb_cm - Simple Thunderbolt connection manager | |
24 | * @tunnel_list: List of active tunnels | |
8afe909b | 25 | * @dp_resources: List of available DP resources for DP tunneling |
9d3cce0b MW |
26 | * @hotplug_active: tb_handle_hotplug will stop progressing plug |
27 | * events and exit if this is not set (it needs to | |
28 | * acquire the lock one more time). Used to drain wq | |
29 | * after cfg has been paused. | |
6ac6faee MW |
30 | * @remove_work: Work used to remove any unplugged routers after |
31 | * runtime resume | |
6ce35635 | 32 | * @groups: Bandwidth groups used in this domain. |
9d3cce0b MW |
33 | */ |
34 | struct tb_cm { | |
35 | struct list_head tunnel_list; | |
8afe909b | 36 | struct list_head dp_resources; |
9d3cce0b | 37 | bool hotplug_active; |
6ac6faee | 38 | struct delayed_work remove_work; |
6ce35635 | 39 | struct tb_bandwidth_group groups[MAX_GROUPS]; |
9d3cce0b | 40 | }; |
9da672a4 | 41 | |
6ac6faee MW |
42 | static inline struct tb *tcm_to_tb(struct tb_cm *tcm) |
43 | { | |
44 | return ((void *)tcm - sizeof(struct tb)); | |
45 | } | |
46 | ||
4f807e47 MW |
47 | struct tb_hotplug_event { |
48 | struct work_struct work; | |
49 | struct tb *tb; | |
50 | u64 route; | |
51 | u8 port; | |
52 | bool unplug; | |
53 | }; | |
54 | ||
6ce35635 MW |
55 | static void tb_init_bandwidth_groups(struct tb_cm *tcm) |
56 | { | |
57 | int i; | |
58 | ||
59 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { | |
60 | struct tb_bandwidth_group *group = &tcm->groups[i]; | |
61 | ||
62 | group->tb = tcm_to_tb(tcm); | |
63 | group->index = i + 1; | |
64 | INIT_LIST_HEAD(&group->ports); | |
65 | } | |
66 | } | |
67 | ||
68 | static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group, | |
69 | struct tb_port *in) | |
70 | { | |
71 | if (!group || WARN_ON(in->group)) | |
72 | return; | |
73 | ||
74 | in->group = group; | |
75 | list_add_tail(&in->group_list, &group->ports); | |
76 | ||
77 | tb_port_dbg(in, "attached to bandwidth group %d\n", group->index); | |
78 | } | |
79 | ||
80 | static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm) | |
81 | { | |
82 | int i; | |
83 | ||
84 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { | |
85 | struct tb_bandwidth_group *group = &tcm->groups[i]; | |
86 | ||
87 | if (list_empty(&group->ports)) | |
88 | return group; | |
89 | } | |
90 | ||
91 | return NULL; | |
92 | } | |
93 | ||
94 | static struct tb_bandwidth_group * | |
95 | tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, | |
96 | struct tb_port *out) | |
97 | { | |
98 | struct tb_bandwidth_group *group; | |
99 | struct tb_tunnel *tunnel; | |
100 | ||
101 | /* | |
102 | * Find all DP tunnels that go through all the same USB4 links | |
103 | * as this one. Because we always setup tunnels the same way we | |
104 | * can just check for the routers at both ends of the tunnels | |
105 | * and if they are the same we have a match. | |
106 | */ | |
107 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
108 | if (!tb_tunnel_is_dp(tunnel)) | |
109 | continue; | |
110 | ||
111 | if (tunnel->src_port->sw == in->sw && | |
112 | tunnel->dst_port->sw == out->sw) { | |
113 | group = tunnel->src_port->group; | |
114 | if (group) { | |
115 | tb_bandwidth_group_attach_port(group, in); | |
116 | return group; | |
117 | } | |
118 | } | |
119 | } | |
120 | ||
121 | /* Pick up next available group then */ | |
122 | group = tb_find_free_bandwidth_group(tcm); | |
123 | if (group) | |
124 | tb_bandwidth_group_attach_port(group, in); | |
125 | else | |
126 | tb_port_warn(in, "no available bandwidth groups\n"); | |
127 | ||
128 | return group; | |
129 | } | |
130 | ||
131 | static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, | |
132 | struct tb_port *out) | |
133 | { | |
8d73f6b8 | 134 | if (usb4_dp_port_bandwidth_mode_enabled(in)) { |
6ce35635 MW |
135 | int index, i; |
136 | ||
137 | index = usb4_dp_port_group_id(in); | |
138 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { | |
139 | if (tcm->groups[i].index == index) { | |
140 | tb_bandwidth_group_attach_port(&tcm->groups[i], in); | |
141 | return; | |
142 | } | |
143 | } | |
144 | } | |
145 | ||
146 | tb_attach_bandwidth_group(tcm, in, out); | |
147 | } | |
148 | ||
149 | static void tb_detach_bandwidth_group(struct tb_port *in) | |
150 | { | |
151 | struct tb_bandwidth_group *group = in->group; | |
152 | ||
153 | if (group) { | |
154 | in->group = NULL; | |
155 | list_del_init(&in->group_list); | |
156 | ||
157 | tb_port_dbg(in, "detached from bandwidth group %d\n", group->index); | |
158 | } | |
159 | } | |
160 | ||
4f807e47 MW |
161 | static void tb_handle_hotplug(struct work_struct *work); |
162 | ||
163 | static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) | |
164 | { | |
165 | struct tb_hotplug_event *ev; | |
166 | ||
167 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); | |
168 | if (!ev) | |
169 | return; | |
170 | ||
171 | ev->tb = tb; | |
172 | ev->route = route; | |
173 | ev->port = port; | |
174 | ev->unplug = unplug; | |
175 | INIT_WORK(&ev->work, tb_handle_hotplug); | |
176 | queue_work(tb->wq, &ev->work); | |
177 | } | |
178 | ||
9da672a4 AN |
179 | /* enumeration & hot plug handling */ |
180 | ||
8afe909b MW |
181 | static void tb_add_dp_resources(struct tb_switch *sw) |
182 | { | |
183 | struct tb_cm *tcm = tb_priv(sw->tb); | |
184 | struct tb_port *port; | |
185 | ||
186 | tb_switch_for_each_port(sw, port) { | |
187 | if (!tb_port_is_dpin(port)) | |
188 | continue; | |
189 | ||
190 | if (!tb_switch_query_dp_resource(sw, port)) | |
191 | continue; | |
192 | ||
193 | list_add_tail(&port->list, &tcm->dp_resources); | |
194 | tb_port_dbg(port, "DP IN resource available\n"); | |
195 | } | |
196 | } | |
197 | ||
198 | static void tb_remove_dp_resources(struct tb_switch *sw) | |
199 | { | |
200 | struct tb_cm *tcm = tb_priv(sw->tb); | |
201 | struct tb_port *port, *tmp; | |
202 | ||
203 | /* Clear children resources first */ | |
204 | tb_switch_for_each_port(sw, port) { | |
205 | if (tb_port_has_remote(port)) | |
206 | tb_remove_dp_resources(port->remote->sw); | |
207 | } | |
208 | ||
209 | list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { | |
210 | if (port->sw == sw) { | |
211 | tb_port_dbg(port, "DP OUT resource unavailable\n"); | |
212 | list_del_init(&port->list); | |
213 | } | |
214 | } | |
215 | } | |
216 | ||
b60e31bf SM |
217 | static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) |
218 | { | |
219 | struct tb_cm *tcm = tb_priv(tb); | |
220 | struct tb_port *p; | |
221 | ||
222 | list_for_each_entry(p, &tcm->dp_resources, list) { | |
223 | if (p == port) | |
224 | return; | |
225 | } | |
226 | ||
227 | tb_port_dbg(port, "DP %s resource available discovered\n", | |
228 | tb_port_is_dpin(port) ? "IN" : "OUT"); | |
229 | list_add_tail(&port->list, &tcm->dp_resources); | |
230 | } | |
231 | ||
232 | static void tb_discover_dp_resources(struct tb *tb) | |
233 | { | |
234 | struct tb_cm *tcm = tb_priv(tb); | |
235 | struct tb_tunnel *tunnel; | |
236 | ||
237 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
238 | if (tb_tunnel_is_dp(tunnel)) | |
239 | tb_discover_dp_resource(tb, tunnel->dst_port); | |
240 | } | |
241 | } | |
242 | ||
53ba2e16 | 243 | /* Enables CL states up to host router */ |
1a9b6cb8 MW |
244 | static int tb_enable_clx(struct tb_switch *sw) |
245 | { | |
53ba2e16 | 246 | struct tb_cm *tcm = tb_priv(sw->tb); |
fd4d58d1 | 247 | unsigned int clx = TB_CL0S | TB_CL1; |
53ba2e16 | 248 | const struct tb_tunnel *tunnel; |
1a9b6cb8 MW |
249 | int ret; |
250 | ||
9650de73 MW |
251 | /* |
252 | * Currently only enable CLx for the first link. This is enough | |
253 | * to allow the CPU to save energy at least on Intel hardware | |
254 | * and makes it slightly simpler to implement. We may change | |
255 | * this in the future to cover the whole topology if it turns | |
256 | * out to be beneficial. | |
257 | */ | |
53ba2e16 MW |
258 | while (sw && sw->config.depth > 1) |
259 | sw = tb_switch_parent(sw); | |
260 | ||
261 | if (!sw) | |
262 | return 0; | |
263 | ||
9650de73 MW |
264 | if (sw->config.depth != 1) |
265 | return 0; | |
266 | ||
53ba2e16 MW |
267 | /* |
268 | * If we are re-enabling then check if there is an active DMA | |
269 | * tunnel and in that case bail out. | |
270 | */ | |
271 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
272 | if (tb_tunnel_is_dma(tunnel)) { | |
273 | if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) | |
274 | return 0; | |
275 | } | |
276 | } | |
277 | ||
1a9b6cb8 | 278 | /* |
fd4d58d1 MW |
279 | * Initially try with CL2. If that's not supported by the |
280 | * topology try with CL0s and CL1 and then give up. | |
1a9b6cb8 | 281 | */ |
fd4d58d1 MW |
282 | ret = tb_switch_clx_enable(sw, clx | TB_CL2); |
283 | if (ret == -EOPNOTSUPP) | |
284 | ret = tb_switch_clx_enable(sw, clx); | |
1a9b6cb8 MW |
285 | return ret == -EOPNOTSUPP ? 0 : ret; |
286 | } | |
287 | ||
53ba2e16 MW |
288 | /* Disables CL states up to the host router */ |
289 | static void tb_disable_clx(struct tb_switch *sw) | |
290 | { | |
291 | do { | |
292 | if (tb_switch_clx_disable(sw) < 0) | |
293 | tb_sw_warn(sw, "failed to disable CL states\n"); | |
294 | sw = tb_switch_parent(sw); | |
295 | } while (sw); | |
296 | } | |
297 | ||
7d283f41 MW |
298 | static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) |
299 | { | |
300 | struct tb_switch *sw; | |
301 | ||
302 | sw = tb_to_switch(dev); | |
d49b4f04 MW |
303 | if (!sw) |
304 | return 0; | |
305 | ||
306 | if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { | |
307 | enum tb_switch_tmu_mode mode; | |
308 | int ret; | |
309 | ||
310 | if (tb_switch_clx_is_enabled(sw, TB_CL1)) | |
311 | mode = TB_SWITCH_TMU_MODE_HIFI_UNI; | |
312 | else | |
313 | mode = TB_SWITCH_TMU_MODE_HIFI_BI; | |
314 | ||
315 | ret = tb_switch_tmu_configure(sw, mode); | |
316 | if (ret) | |
317 | return ret; | |
318 | ||
319 | return tb_switch_tmu_enable(sw); | |
7d283f41 MW |
320 | } |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) | |
326 | { | |
327 | struct tb_switch *sw; | |
328 | ||
329 | if (!tunnel) | |
330 | return; | |
331 | ||
332 | /* | |
333 | * Once first DP tunnel is established we change the TMU | |
334 | * accuracy of first depth child routers (and the host router) | |
335 | * to the highest. This is needed for the DP tunneling to work | |
336 | * but also allows CL0s. | |
d49b4f04 MW |
337 | * |
338 | * If both routers are v2 then we don't need to do anything as | |
339 | * they are using enhanced TMU mode that allows all CLx. | |
7d283f41 MW |
340 | */ |
341 | sw = tunnel->tb->root_switch; | |
342 | device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); | |
343 | } | |
344 | ||
4e7b4955 MW |
345 | static int tb_enable_tmu(struct tb_switch *sw) |
346 | { | |
347 | int ret; | |
348 | ||
349 | /* | |
d49b4f04 MW |
350 | * If both routers at the end of the link are v2 we simply |
351 | * enable the enhanched uni-directional mode. That covers all | |
352 | * the CL states. For v1 and before we need to use the normal | |
353 | * rate to allow CL1 (when supported). Otherwise we keep the TMU | |
354 | * running at the highest accuracy. | |
4e7b4955 | 355 | */ |
d49b4f04 MW |
356 | ret = tb_switch_tmu_configure(sw, |
357 | TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI); | |
358 | if (ret == -EOPNOTSUPP) { | |
359 | if (tb_switch_clx_is_enabled(sw, TB_CL1)) | |
360 | ret = tb_switch_tmu_configure(sw, | |
361 | TB_SWITCH_TMU_MODE_LOWRES); | |
362 | else | |
363 | ret = tb_switch_tmu_configure(sw, | |
364 | TB_SWITCH_TMU_MODE_HIFI_BI); | |
365 | } | |
ef34add8 MW |
366 | if (ret) |
367 | return ret; | |
4e7b4955 MW |
368 | |
369 | /* If it is already enabled in correct mode, don't touch it */ | |
370 | if (tb_switch_tmu_is_enabled(sw)) | |
371 | return 0; | |
372 | ||
373 | ret = tb_switch_tmu_disable(sw); | |
374 | if (ret) | |
375 | return ret; | |
376 | ||
377 | ret = tb_switch_tmu_post_time(sw); | |
378 | if (ret) | |
379 | return ret; | |
380 | ||
381 | return tb_switch_tmu_enable(sw); | |
382 | } | |
383 | ||
43bddb26 MW |
384 | static void tb_switch_discover_tunnels(struct tb_switch *sw, |
385 | struct list_head *list, | |
386 | bool alloc_hopids) | |
0414bec5 MW |
387 | { |
388 | struct tb *tb = sw->tb; | |
0414bec5 | 389 | struct tb_port *port; |
0414bec5 | 390 | |
b433d010 | 391 | tb_switch_for_each_port(sw, port) { |
0414bec5 MW |
392 | struct tb_tunnel *tunnel = NULL; |
393 | ||
0414bec5 | 394 | switch (port->config.type) { |
4f807e47 | 395 | case TB_TYPE_DP_HDMI_IN: |
43bddb26 | 396 | tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); |
7d283f41 | 397 | tb_increase_tmu_accuracy(tunnel); |
4f807e47 MW |
398 | break; |
399 | ||
0414bec5 | 400 | case TB_TYPE_PCIE_DOWN: |
43bddb26 | 401 | tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); |
0414bec5 MW |
402 | break; |
403 | ||
e6f81858 | 404 | case TB_TYPE_USB3_DOWN: |
43bddb26 | 405 | tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); |
e6f81858 RM |
406 | break; |
407 | ||
0414bec5 MW |
408 | default: |
409 | break; | |
410 | } | |
411 | ||
43bddb26 MW |
412 | if (tunnel) |
413 | list_add_tail(&tunnel->list, list); | |
414 | } | |
4f807e47 | 415 | |
43bddb26 MW |
416 | tb_switch_for_each_port(sw, port) { |
417 | if (tb_port_has_remote(port)) { | |
418 | tb_switch_discover_tunnels(port->remote->sw, list, | |
419 | alloc_hopids); | |
420 | } | |
421 | } | |
422 | } | |
423 | ||
424 | static void tb_discover_tunnels(struct tb *tb) | |
425 | { | |
426 | struct tb_cm *tcm = tb_priv(tb); | |
427 | struct tb_tunnel *tunnel; | |
428 | ||
429 | tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); | |
430 | ||
431 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
4f807e47 | 432 | if (tb_tunnel_is_pci(tunnel)) { |
0414bec5 MW |
433 | struct tb_switch *parent = tunnel->dst_port->sw; |
434 | ||
435 | while (parent != tunnel->src_port->sw) { | |
436 | parent->boot = true; | |
437 | parent = tb_switch_parent(parent); | |
438 | } | |
c94732bd | 439 | } else if (tb_tunnel_is_dp(tunnel)) { |
6ce35635 MW |
440 | struct tb_port *in = tunnel->src_port; |
441 | struct tb_port *out = tunnel->dst_port; | |
442 | ||
c94732bd | 443 | /* Keep the domain from powering down */ |
6ce35635 MW |
444 | pm_runtime_get_sync(&in->sw->dev); |
445 | pm_runtime_get_sync(&out->sw->dev); | |
446 | ||
447 | tb_discover_bandwidth_group(tcm, in, out); | |
0414bec5 | 448 | } |
0414bec5 MW |
449 | } |
450 | } | |
9da672a4 | 451 | |
f9cad07b | 452 | static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) |
284652a4 MW |
453 | { |
454 | if (tb_switch_is_usb4(port->sw)) | |
f9cad07b | 455 | return usb4_port_configure_xdomain(port, xd); |
284652a4 MW |
456 | return tb_lc_configure_xdomain(port); |
457 | } | |
458 | ||
459 | static void tb_port_unconfigure_xdomain(struct tb_port *port) | |
460 | { | |
461 | if (tb_switch_is_usb4(port->sw)) | |
462 | usb4_port_unconfigure_xdomain(port); | |
463 | else | |
464 | tb_lc_unconfigure_xdomain(port); | |
341d4518 MW |
465 | |
466 | tb_port_enable(port->dual_link_port); | |
284652a4 MW |
467 | } |
468 | ||
7ea4cd6b MW |
469 | static void tb_scan_xdomain(struct tb_port *port) |
470 | { | |
471 | struct tb_switch *sw = port->sw; | |
472 | struct tb *tb = sw->tb; | |
473 | struct tb_xdomain *xd; | |
474 | u64 route; | |
475 | ||
5ca67688 MW |
476 | if (!tb_is_xdomain_enabled()) |
477 | return; | |
478 | ||
7ea4cd6b MW |
479 | route = tb_downstream_route(port); |
480 | xd = tb_xdomain_find_by_route(tb, route); | |
481 | if (xd) { | |
482 | tb_xdomain_put(xd); | |
483 | return; | |
484 | } | |
485 | ||
486 | xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, | |
487 | NULL); | |
488 | if (xd) { | |
489 | tb_port_at(route, sw)->xdomain = xd; | |
f9cad07b | 490 | tb_port_configure_xdomain(port, xd); |
7ea4cd6b MW |
491 | tb_xdomain_add(xd); |
492 | } | |
493 | } | |
494 | ||
e6f81858 RM |
495 | /** |
496 | * tb_find_unused_port() - return the first inactive port on @sw | |
497 | * @sw: Switch to find the port on | |
498 | * @type: Port type to look for | |
499 | */ | |
500 | static struct tb_port *tb_find_unused_port(struct tb_switch *sw, | |
501 | enum tb_port_type type) | |
502 | { | |
503 | struct tb_port *port; | |
504 | ||
505 | tb_switch_for_each_port(sw, port) { | |
506 | if (tb_is_upstream_port(port)) | |
507 | continue; | |
508 | if (port->config.type != type) | |
509 | continue; | |
510 | if (!port->cap_adap) | |
511 | continue; | |
512 | if (tb_port_is_enabled(port)) | |
513 | continue; | |
514 | return port; | |
515 | } | |
516 | return NULL; | |
517 | } | |
518 | ||
519 | static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, | |
77cfa40f | 520 | const struct tb_port *port) |
e6f81858 RM |
521 | { |
522 | struct tb_port *down; | |
523 | ||
524 | down = usb4_switch_map_usb3_down(sw, port); | |
77cfa40f | 525 | if (down && !tb_usb3_port_is_enabled(down)) |
e6f81858 | 526 | return down; |
77cfa40f | 527 | return NULL; |
e6f81858 RM |
528 | } |
529 | ||
0bd680cd MW |
530 | static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, |
531 | struct tb_port *src_port, | |
532 | struct tb_port *dst_port) | |
533 | { | |
534 | struct tb_cm *tcm = tb_priv(tb); | |
535 | struct tb_tunnel *tunnel; | |
536 | ||
537 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
538 | if (tunnel->type == type && | |
539 | ((src_port && src_port == tunnel->src_port) || | |
540 | (dst_port && dst_port == tunnel->dst_port))) { | |
541 | return tunnel; | |
542 | } | |
543 | } | |
544 | ||
545 | return NULL; | |
546 | } | |
547 | ||
548 | static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, | |
549 | struct tb_port *src_port, | |
550 | struct tb_port *dst_port) | |
551 | { | |
552 | struct tb_port *port, *usb3_down; | |
553 | struct tb_switch *sw; | |
554 | ||
555 | /* Pick the router that is deepest in the topology */ | |
556 | if (dst_port->sw->config.depth > src_port->sw->config.depth) | |
557 | sw = dst_port->sw; | |
558 | else | |
559 | sw = src_port->sw; | |
560 | ||
561 | /* Can't be the host router */ | |
562 | if (sw == tb->root_switch) | |
563 | return NULL; | |
564 | ||
565 | /* Find the downstream USB4 port that leads to this router */ | |
566 | port = tb_port_at(tb_route(sw), tb->root_switch); | |
567 | /* Find the corresponding host router USB3 downstream port */ | |
568 | usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); | |
569 | if (!usb3_down) | |
570 | return NULL; | |
571 | ||
572 | return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); | |
573 | } | |
574 | ||
575 | static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, | |
576 | struct tb_port *dst_port, int *available_up, int *available_down) | |
577 | { | |
578 | int usb3_consumed_up, usb3_consumed_down, ret; | |
579 | struct tb_cm *tcm = tb_priv(tb); | |
580 | struct tb_tunnel *tunnel; | |
581 | struct tb_port *port; | |
582 | ||
2426fdf7 MW |
583 | tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n", |
584 | tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw), | |
585 | dst_port->port); | |
0bd680cd MW |
586 | |
587 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); | |
6ce35635 MW |
588 | if (tunnel && tunnel->src_port != src_port && |
589 | tunnel->dst_port != dst_port) { | |
0bd680cd MW |
590 | ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, |
591 | &usb3_consumed_down); | |
592 | if (ret) | |
593 | return ret; | |
594 | } else { | |
595 | usb3_consumed_up = 0; | |
596 | usb3_consumed_down = 0; | |
597 | } | |
598 | ||
e111fb92 GF |
599 | /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ |
600 | *available_up = *available_down = 120000; | |
0bd680cd MW |
601 | |
602 | /* Find the minimum available bandwidth over all links */ | |
603 | tb_for_each_port_on_path(src_port, dst_port, port) { | |
604 | int link_speed, link_width, up_bw, down_bw; | |
605 | ||
606 | if (!tb_port_is_null(port)) | |
607 | continue; | |
608 | ||
609 | if (tb_is_upstream_port(port)) { | |
610 | link_speed = port->sw->link_speed; | |
e111fb92 GF |
611 | /* |
612 | * sw->link_width is from upstream perspective | |
613 | * so we use the opposite for downstream of the | |
614 | * host router. | |
615 | */ | |
616 | if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { | |
617 | up_bw = link_speed * 3 * 1000; | |
618 | down_bw = link_speed * 1 * 1000; | |
619 | } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { | |
620 | up_bw = link_speed * 1 * 1000; | |
621 | down_bw = link_speed * 3 * 1000; | |
622 | } else { | |
623 | up_bw = link_speed * port->sw->link_width * 1000; | |
624 | down_bw = up_bw; | |
625 | } | |
0bd680cd MW |
626 | } else { |
627 | link_speed = tb_port_get_link_speed(port); | |
628 | if (link_speed < 0) | |
629 | return link_speed; | |
0bd680cd | 630 | |
e111fb92 GF |
631 | link_width = tb_port_get_link_width(port); |
632 | if (link_width < 0) | |
633 | return link_width; | |
634 | ||
635 | if (link_width == TB_LINK_WIDTH_ASYM_TX) { | |
636 | up_bw = link_speed * 1 * 1000; | |
637 | down_bw = link_speed * 3 * 1000; | |
638 | } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { | |
639 | up_bw = link_speed * 3 * 1000; | |
640 | down_bw = link_speed * 1 * 1000; | |
641 | } else { | |
642 | up_bw = link_speed * link_width * 1000; | |
643 | down_bw = up_bw; | |
644 | } | |
645 | } | |
0bd680cd | 646 | |
0bd680cd MW |
647 | /* Leave 10% guard band */ |
648 | up_bw -= up_bw / 10; | |
e111fb92 | 649 | down_bw -= down_bw / 10; |
0bd680cd | 650 | |
2426fdf7 MW |
651 | tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw, |
652 | down_bw); | |
0bd680cd MW |
653 | |
654 | /* | |
655 | * Find all DP tunnels that cross the port and reduce | |
656 | * their consumed bandwidth from the available. | |
657 | */ | |
658 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
659 | int dp_consumed_up, dp_consumed_down; | |
660 | ||
6ce35635 MW |
661 | if (tb_tunnel_is_invalid(tunnel)) |
662 | continue; | |
663 | ||
0bd680cd MW |
664 | if (!tb_tunnel_is_dp(tunnel)) |
665 | continue; | |
666 | ||
667 | if (!tb_tunnel_port_on_path(tunnel, port)) | |
668 | continue; | |
669 | ||
6ce35635 MW |
670 | /* |
671 | * Ignore the DP tunnel between src_port and | |
672 | * dst_port because it is the same tunnel and we | |
673 | * may be re-calculating estimated bandwidth. | |
674 | */ | |
675 | if (tunnel->src_port == src_port && | |
676 | tunnel->dst_port == dst_port) | |
677 | continue; | |
678 | ||
0bd680cd MW |
679 | ret = tb_tunnel_consumed_bandwidth(tunnel, |
680 | &dp_consumed_up, | |
681 | &dp_consumed_down); | |
682 | if (ret) | |
683 | return ret; | |
684 | ||
685 | up_bw -= dp_consumed_up; | |
686 | down_bw -= dp_consumed_down; | |
687 | } | |
688 | ||
689 | /* | |
690 | * If USB3 is tunneled from the host router down to the | |
691 | * branch leading to port we need to take USB3 consumed | |
692 | * bandwidth into account regardless whether it actually | |
693 | * crosses the port. | |
694 | */ | |
695 | up_bw -= usb3_consumed_up; | |
696 | down_bw -= usb3_consumed_down; | |
697 | ||
698 | if (up_bw < *available_up) | |
699 | *available_up = up_bw; | |
700 | if (down_bw < *available_down) | |
701 | *available_down = down_bw; | |
702 | } | |
703 | ||
704 | if (*available_up < 0) | |
705 | *available_up = 0; | |
706 | if (*available_down < 0) | |
707 | *available_down = 0; | |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
712 | static int tb_release_unused_usb3_bandwidth(struct tb *tb, | |
713 | struct tb_port *src_port, | |
714 | struct tb_port *dst_port) | |
715 | { | |
716 | struct tb_tunnel *tunnel; | |
717 | ||
718 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); | |
719 | return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; | |
720 | } | |
721 | ||
722 | static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, | |
723 | struct tb_port *dst_port) | |
724 | { | |
725 | int ret, available_up, available_down; | |
726 | struct tb_tunnel *tunnel; | |
727 | ||
728 | tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); | |
729 | if (!tunnel) | |
730 | return; | |
731 | ||
732 | tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); | |
733 | ||
734 | /* | |
735 | * Calculate available bandwidth for the first hop USB3 tunnel. | |
736 | * That determines the whole USB3 bandwidth for this branch. | |
737 | */ | |
738 | ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, | |
739 | &available_up, &available_down); | |
740 | if (ret) { | |
741 | tb_warn(tb, "failed to calculate available bandwidth\n"); | |
742 | return; | |
743 | } | |
744 | ||
745 | tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", | |
746 | available_up, available_down); | |
747 | ||
748 | tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); | |
749 | } | |
750 | ||
e6f81858 RM |
751 | static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) |
752 | { | |
753 | struct tb_switch *parent = tb_switch_parent(sw); | |
0bd680cd | 754 | int ret, available_up, available_down; |
e6f81858 RM |
755 | struct tb_port *up, *down, *port; |
756 | struct tb_cm *tcm = tb_priv(tb); | |
757 | struct tb_tunnel *tunnel; | |
758 | ||
c6da62a2 MW |
759 | if (!tb_acpi_may_tunnel_usb3()) { |
760 | tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); | |
761 | return 0; | |
762 | } | |
763 | ||
e6f81858 RM |
764 | up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); |
765 | if (!up) | |
766 | return 0; | |
767 | ||
bbcf40b3 MW |
768 | if (!sw->link_usb4) |
769 | return 0; | |
770 | ||
e6f81858 RM |
771 | /* |
772 | * Look up available down port. Since we are chaining it should | |
773 | * be found right above this switch. | |
774 | */ | |
7ce54221 | 775 | port = tb_switch_downstream_port(sw); |
e6f81858 RM |
776 | down = tb_find_usb3_down(parent, port); |
777 | if (!down) | |
778 | return 0; | |
779 | ||
780 | if (tb_route(parent)) { | |
781 | struct tb_port *parent_up; | |
782 | /* | |
783 | * Check first that the parent switch has its upstream USB3 | |
784 | * port enabled. Otherwise the chain is not complete and | |
785 | * there is no point setting up a new tunnel. | |
786 | */ | |
787 | parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); | |
788 | if (!parent_up || !tb_port_is_enabled(parent_up)) | |
789 | return 0; | |
0bd680cd MW |
790 | |
791 | /* Make all unused bandwidth available for the new tunnel */ | |
792 | ret = tb_release_unused_usb3_bandwidth(tb, down, up); | |
793 | if (ret) | |
794 | return ret; | |
e6f81858 RM |
795 | } |
796 | ||
0bd680cd MW |
797 | ret = tb_available_bandwidth(tb, down, up, &available_up, |
798 | &available_down); | |
799 | if (ret) | |
800 | goto err_reclaim; | |
801 | ||
802 | tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", | |
803 | available_up, available_down); | |
804 | ||
805 | tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, | |
806 | available_down); | |
807 | if (!tunnel) { | |
808 | ret = -ENOMEM; | |
809 | goto err_reclaim; | |
810 | } | |
e6f81858 RM |
811 | |
812 | if (tb_tunnel_activate(tunnel)) { | |
813 | tb_port_info(up, | |
814 | "USB3 tunnel activation failed, aborting\n"); | |
0bd680cd MW |
815 | ret = -EIO; |
816 | goto err_free; | |
e6f81858 RM |
817 | } |
818 | ||
819 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
0bd680cd MW |
820 | if (tb_route(parent)) |
821 | tb_reclaim_usb3_bandwidth(tb, down, up); | |
822 | ||
e6f81858 | 823 | return 0; |
0bd680cd MW |
824 | |
825 | err_free: | |
826 | tb_tunnel_free(tunnel); | |
827 | err_reclaim: | |
828 | if (tb_route(parent)) | |
829 | tb_reclaim_usb3_bandwidth(tb, down, up); | |
830 | ||
831 | return ret; | |
e6f81858 RM |
832 | } |
833 | ||
834 | static int tb_create_usb3_tunnels(struct tb_switch *sw) | |
835 | { | |
836 | struct tb_port *port; | |
837 | int ret; | |
838 | ||
c6da62a2 MW |
839 | if (!tb_acpi_may_tunnel_usb3()) |
840 | return 0; | |
841 | ||
e6f81858 RM |
842 | if (tb_route(sw)) { |
843 | ret = tb_tunnel_usb3(sw->tb, sw); | |
844 | if (ret) | |
845 | return ret; | |
846 | } | |
847 | ||
848 | tb_switch_for_each_port(sw, port) { | |
849 | if (!tb_port_has_remote(port)) | |
850 | continue; | |
851 | ret = tb_create_usb3_tunnels(port->remote->sw); | |
852 | if (ret) | |
853 | return ret; | |
854 | } | |
855 | ||
856 | return 0; | |
857 | } | |
858 | ||
9da672a4 AN |
859 | static void tb_scan_port(struct tb_port *port); |
860 | ||
877e50b3 | 861 | /* |
9da672a4 AN |
862 | * tb_scan_switch() - scan for and initialize downstream switches |
863 | */ | |
864 | static void tb_scan_switch(struct tb_switch *sw) | |
865 | { | |
b433d010 MW |
866 | struct tb_port *port; |
867 | ||
6ac6faee MW |
868 | pm_runtime_get_sync(&sw->dev); |
869 | ||
b433d010 MW |
870 | tb_switch_for_each_port(sw, port) |
871 | tb_scan_port(port); | |
6ac6faee MW |
872 | |
873 | pm_runtime_mark_last_busy(&sw->dev); | |
874 | pm_runtime_put_autosuspend(&sw->dev); | |
9da672a4 AN |
875 | } |
876 | ||
877e50b3 | 877 | /* |
9da672a4 AN |
878 | * tb_scan_port() - check for and initialize switches below port |
879 | */ | |
880 | static void tb_scan_port(struct tb_port *port) | |
881 | { | |
99cabbb0 | 882 | struct tb_cm *tcm = tb_priv(port->sw->tb); |
dfe40ca4 | 883 | struct tb_port *upstream_port; |
3fe95742 | 884 | bool discovery = false; |
9da672a4 | 885 | struct tb_switch *sw; |
dfe40ca4 | 886 | |
9da672a4 AN |
887 | if (tb_is_upstream_port(port)) |
888 | return; | |
4f807e47 MW |
889 | |
890 | if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && | |
891 | !tb_dp_port_is_enabled(port)) { | |
892 | tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); | |
893 | tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, | |
894 | false); | |
895 | return; | |
896 | } | |
897 | ||
9da672a4 AN |
898 | if (port->config.type != TB_TYPE_PORT) |
899 | return; | |
343fcb8c AN |
900 | if (port->dual_link_port && port->link_nr) |
901 | return; /* | |
902 | * Downstream switch is reachable through two ports. | |
903 | * Only scan on the primary port (link_nr == 0). | |
904 | */ | |
23257cfc MW |
905 | |
906 | if (port->usb4) | |
907 | pm_runtime_get_sync(&port->usb4->dev); | |
908 | ||
9da672a4 | 909 | if (tb_wait_for_port(port, false) <= 0) |
23257cfc | 910 | goto out_rpm_put; |
9da672a4 | 911 | if (port->remote) { |
7ea4cd6b | 912 | tb_port_dbg(port, "port already has a remote\n"); |
23257cfc | 913 | goto out_rpm_put; |
9da672a4 | 914 | } |
dacb1287 | 915 | |
3fb10ea4 | 916 | tb_retimer_scan(port, true); |
dacb1287 | 917 | |
bfe778ac MW |
918 | sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, |
919 | tb_downstream_route(port)); | |
7ea4cd6b MW |
920 | if (IS_ERR(sw)) { |
921 | /* | |
922 | * If there is an error accessing the connected switch | |
923 | * it may be connected to another domain. Also we allow | |
924 | * the other domain to be connected to a max depth switch. | |
925 | */ | |
926 | if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) | |
927 | tb_scan_xdomain(port); | |
23257cfc | 928 | goto out_rpm_put; |
7ea4cd6b | 929 | } |
bfe778ac MW |
930 | |
931 | if (tb_switch_configure(sw)) { | |
932 | tb_switch_put(sw); | |
23257cfc | 933 | goto out_rpm_put; |
bfe778ac MW |
934 | } |
935 | ||
7ea4cd6b MW |
936 | /* |
937 | * If there was previously another domain connected remove it | |
938 | * first. | |
939 | */ | |
940 | if (port->xdomain) { | |
941 | tb_xdomain_remove(port->xdomain); | |
284652a4 | 942 | tb_port_unconfigure_xdomain(port); |
7ea4cd6b MW |
943 | port->xdomain = NULL; |
944 | } | |
945 | ||
99cabbb0 MW |
946 | /* |
947 | * Do not send uevents until we have discovered all existing | |
948 | * tunnels and know which switches were authorized already by | |
949 | * the boot firmware. | |
950 | */ | |
3fe95742 | 951 | if (!tcm->hotplug_active) { |
99cabbb0 | 952 | dev_set_uevent_suppress(&sw->dev, true); |
3fe95742 MW |
953 | discovery = true; |
954 | } | |
f67cf491 | 955 | |
6ac6faee MW |
956 | /* |
957 | * At the moment Thunderbolt 2 and beyond (devices with LC) we | |
958 | * can support runtime PM. | |
959 | */ | |
960 | sw->rpm = sw->generation > 1; | |
961 | ||
bfe778ac MW |
962 | if (tb_switch_add(sw)) { |
963 | tb_switch_put(sw); | |
23257cfc | 964 | goto out_rpm_put; |
bfe778ac MW |
965 | } |
966 | ||
dfe40ca4 MW |
967 | /* Link the switches using both links if available */ |
968 | upstream_port = tb_upstream_port(sw); | |
969 | port->remote = upstream_port; | |
970 | upstream_port->remote = port; | |
971 | if (port->dual_link_port && upstream_port->dual_link_port) { | |
972 | port->dual_link_port->remote = upstream_port->dual_link_port; | |
973 | upstream_port->dual_link_port->remote = port->dual_link_port; | |
974 | } | |
975 | ||
91c0c120 | 976 | /* Enable lane bonding if supported */ |
2ca3263a | 977 | tb_switch_lane_bonding_enable(sw); |
de462039 MW |
978 | /* Set the link configured */ |
979 | tb_switch_configure_link(sw); | |
b017a46d GF |
980 | /* |
981 | * CL0s and CL1 are enabled and supported together. | |
982 | * Silently ignore CLx enabling in case CLx is not supported. | |
983 | */ | |
1a9b6cb8 | 984 | if (discovery) |
3fe95742 | 985 | tb_sw_dbg(sw, "discovery, not touching CL states\n"); |
1a9b6cb8 MW |
986 | else if (tb_enable_clx(sw)) |
987 | tb_sw_warn(sw, "failed to enable CL states\n"); | |
8a90e4fa | 988 | |
cf29b9af RM |
989 | if (tb_enable_tmu(sw)) |
990 | tb_sw_warn(sw, "failed to enable TMU\n"); | |
991 | ||
d49b4f04 MW |
992 | /* |
993 | * Configuration valid needs to be set after the TMU has been | |
994 | * enabled for the upstream port of the router so we do it here. | |
995 | */ | |
996 | tb_switch_configuration_valid(sw); | |
997 | ||
dacb1287 | 998 | /* Scan upstream retimers */ |
3fb10ea4 | 999 | tb_retimer_scan(upstream_port, true); |
dacb1287 | 1000 | |
e6f81858 RM |
1001 | /* |
1002 | * Create USB 3.x tunnels only when the switch is plugged to the | |
1003 | * domain. This is because we scan the domain also during discovery | |
1004 | * and want to discover existing USB 3.x tunnels before we create | |
1005 | * any new. | |
1006 | */ | |
1007 | if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) | |
1008 | tb_sw_warn(sw, "USB3 tunnel creation failed\n"); | |
1009 | ||
e876f34a | 1010 | tb_add_dp_resources(sw); |
9da672a4 | 1011 | tb_scan_switch(sw); |
23257cfc MW |
1012 | |
1013 | out_rpm_put: | |
1014 | if (port->usb4) { | |
1015 | pm_runtime_mark_last_busy(&port->usb4->dev); | |
1016 | pm_runtime_put_autosuspend(&port->usb4->dev); | |
1017 | } | |
9da672a4 AN |
1018 | } |
1019 | ||
8afe909b MW |
1020 | static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) |
1021 | { | |
0bd680cd MW |
1022 | struct tb_port *src_port, *dst_port; |
1023 | struct tb *tb; | |
1024 | ||
8afe909b MW |
1025 | if (!tunnel) |
1026 | return; | |
1027 | ||
1028 | tb_tunnel_deactivate(tunnel); | |
1029 | list_del(&tunnel->list); | |
1030 | ||
0bd680cd MW |
1031 | tb = tunnel->tb; |
1032 | src_port = tunnel->src_port; | |
1033 | dst_port = tunnel->dst_port; | |
1034 | ||
1035 | switch (tunnel->type) { | |
1036 | case TB_TUNNEL_DP: | |
6ce35635 | 1037 | tb_detach_bandwidth_group(src_port); |
0bd680cd MW |
1038 | /* |
1039 | * In case of DP tunnel make sure the DP IN resource is | |
1040 | * deallocated properly. | |
1041 | */ | |
1042 | tb_switch_dealloc_dp_resource(src_port->sw, src_port); | |
6ac6faee MW |
1043 | /* Now we can allow the domain to runtime suspend again */ |
1044 | pm_runtime_mark_last_busy(&dst_port->sw->dev); | |
1045 | pm_runtime_put_autosuspend(&dst_port->sw->dev); | |
1046 | pm_runtime_mark_last_busy(&src_port->sw->dev); | |
1047 | pm_runtime_put_autosuspend(&src_port->sw->dev); | |
0bd680cd MW |
1048 | fallthrough; |
1049 | ||
1050 | case TB_TUNNEL_USB3: | |
1051 | tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); | |
1052 | break; | |
8afe909b | 1053 | |
0bd680cd MW |
1054 | default: |
1055 | /* | |
1056 | * PCIe and DMA tunnels do not consume guaranteed | |
1057 | * bandwidth. | |
1058 | */ | |
1059 | break; | |
8afe909b MW |
1060 | } |
1061 | ||
1062 | tb_tunnel_free(tunnel); | |
4f807e47 MW |
1063 | } |
1064 | ||
877e50b3 | 1065 | /* |
3364f0c1 AN |
1066 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away |
1067 | */ | |
1068 | static void tb_free_invalid_tunnels(struct tb *tb) | |
1069 | { | |
9d3cce0b | 1070 | struct tb_cm *tcm = tb_priv(tb); |
93f36ade MW |
1071 | struct tb_tunnel *tunnel; |
1072 | struct tb_tunnel *n; | |
9d3cce0b MW |
1073 | |
1074 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { | |
8afe909b MW |
1075 | if (tb_tunnel_is_invalid(tunnel)) |
1076 | tb_deactivate_and_free_tunnel(tunnel); | |
3364f0c1 AN |
1077 | } |
1078 | } | |
1079 | ||
877e50b3 | 1080 | /* |
23dd5bb4 AN |
1081 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches |
1082 | */ | |
1083 | static void tb_free_unplugged_children(struct tb_switch *sw) | |
1084 | { | |
b433d010 | 1085 | struct tb_port *port; |
dfe40ca4 | 1086 | |
b433d010 | 1087 | tb_switch_for_each_port(sw, port) { |
dfe40ca4 | 1088 | if (!tb_port_has_remote(port)) |
23dd5bb4 | 1089 | continue; |
dfe40ca4 | 1090 | |
23dd5bb4 | 1091 | if (port->remote->sw->is_unplugged) { |
dacb1287 | 1092 | tb_retimer_remove_all(port); |
8afe909b | 1093 | tb_remove_dp_resources(port->remote->sw); |
de462039 | 1094 | tb_switch_unconfigure_link(port->remote->sw); |
91c0c120 | 1095 | tb_switch_lane_bonding_disable(port->remote->sw); |
bfe778ac | 1096 | tb_switch_remove(port->remote->sw); |
23dd5bb4 | 1097 | port->remote = NULL; |
dfe40ca4 MW |
1098 | if (port->dual_link_port) |
1099 | port->dual_link_port->remote = NULL; | |
23dd5bb4 AN |
1100 | } else { |
1101 | tb_free_unplugged_children(port->remote->sw); | |
1102 | } | |
1103 | } | |
1104 | } | |
1105 | ||
99cabbb0 MW |
1106 | static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, |
1107 | const struct tb_port *port) | |
3364f0c1 | 1108 | { |
b0407983 MW |
1109 | struct tb_port *down = NULL; |
1110 | ||
99cabbb0 MW |
1111 | /* |
1112 | * To keep plugging devices consistently in the same PCIe | |
b0407983 | 1113 | * hierarchy, do mapping here for switch downstream PCIe ports. |
99cabbb0 | 1114 | */ |
b0407983 MW |
1115 | if (tb_switch_is_usb4(sw)) { |
1116 | down = usb4_switch_map_pcie_down(sw, port); | |
1117 | } else if (!tb_route(sw)) { | |
99cabbb0 MW |
1118 | int phy_port = tb_phy_port_from_link(port->port); |
1119 | int index; | |
9d3cce0b | 1120 | |
99cabbb0 MW |
1121 | /* |
1122 | * Hard-coded Thunderbolt port to PCIe down port mapping | |
1123 | * per controller. | |
1124 | */ | |
7bffd97e MW |
1125 | if (tb_switch_is_cactus_ridge(sw) || |
1126 | tb_switch_is_alpine_ridge(sw)) | |
99cabbb0 | 1127 | index = !phy_port ? 6 : 7; |
17a8f815 | 1128 | else if (tb_switch_is_falcon_ridge(sw)) |
99cabbb0 | 1129 | index = !phy_port ? 6 : 8; |
7bffd97e MW |
1130 | else if (tb_switch_is_titan_ridge(sw)) |
1131 | index = !phy_port ? 8 : 9; | |
99cabbb0 MW |
1132 | else |
1133 | goto out; | |
1134 | ||
1135 | /* Validate the hard-coding */ | |
1136 | if (WARN_ON(index > sw->config.max_port_number)) | |
1137 | goto out; | |
b0407983 MW |
1138 | |
1139 | down = &sw->ports[index]; | |
1140 | } | |
1141 | ||
1142 | if (down) { | |
1143 | if (WARN_ON(!tb_port_is_pcie_down(down))) | |
99cabbb0 | 1144 | goto out; |
9cac51a0 | 1145 | if (tb_pci_port_is_enabled(down)) |
99cabbb0 MW |
1146 | goto out; |
1147 | ||
b0407983 | 1148 | return down; |
99cabbb0 | 1149 | } |
3364f0c1 | 1150 | |
99cabbb0 | 1151 | out: |
e78db6f0 | 1152 | return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); |
99cabbb0 | 1153 | } |
3364f0c1 | 1154 | |
6ce35635 MW |
1155 | static void |
1156 | tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) | |
1157 | { | |
1158 | struct tb_tunnel *first_tunnel; | |
1159 | struct tb *tb = group->tb; | |
1160 | struct tb_port *in; | |
1161 | int ret; | |
1162 | ||
1163 | tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n", | |
1164 | group->index); | |
1165 | ||
1166 | first_tunnel = NULL; | |
1167 | list_for_each_entry(in, &group->ports, group_list) { | |
1168 | int estimated_bw, estimated_up, estimated_down; | |
1169 | struct tb_tunnel *tunnel; | |
1170 | struct tb_port *out; | |
1171 | ||
8d73f6b8 | 1172 | if (!usb4_dp_port_bandwidth_mode_enabled(in)) |
6ce35635 MW |
1173 | continue; |
1174 | ||
1175 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); | |
1176 | if (WARN_ON(!tunnel)) | |
1177 | break; | |
1178 | ||
1179 | if (!first_tunnel) { | |
1180 | /* | |
1181 | * Since USB3 bandwidth is shared by all DP | |
1182 | * tunnels under the host router USB4 port, even | |
1183 | * if they do not begin from the host router, we | |
1184 | * can release USB3 bandwidth just once and not | |
1185 | * for each tunnel separately. | |
1186 | */ | |
1187 | first_tunnel = tunnel; | |
1188 | ret = tb_release_unused_usb3_bandwidth(tb, | |
1189 | first_tunnel->src_port, first_tunnel->dst_port); | |
1190 | if (ret) { | |
1191 | tb_port_warn(in, | |
1192 | "failed to release unused bandwidth\n"); | |
1193 | break; | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | out = tunnel->dst_port; | |
1198 | ret = tb_available_bandwidth(tb, in, out, &estimated_up, | |
1199 | &estimated_down); | |
1200 | if (ret) { | |
1201 | tb_port_warn(in, | |
1202 | "failed to re-calculate estimated bandwidth\n"); | |
1203 | break; | |
1204 | } | |
1205 | ||
1206 | /* | |
1207 | * Estimated bandwidth includes: | |
1208 | * - already allocated bandwidth for the DP tunnel | |
1209 | * - available bandwidth along the path | |
1210 | * - bandwidth allocated for USB 3.x but not used. | |
1211 | */ | |
1212 | tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n", | |
1213 | estimated_up, estimated_down); | |
1214 | ||
1215 | if (in->sw->config.depth < out->sw->config.depth) | |
1216 | estimated_bw = estimated_down; | |
1217 | else | |
1218 | estimated_bw = estimated_up; | |
1219 | ||
8d73f6b8 | 1220 | if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw)) |
6ce35635 MW |
1221 | tb_port_warn(in, "failed to update estimated bandwidth\n"); |
1222 | } | |
1223 | ||
1224 | if (first_tunnel) | |
1225 | tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port, | |
1226 | first_tunnel->dst_port); | |
1227 | ||
1228 | tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index); | |
1229 | } | |
1230 | ||
1231 | static void tb_recalc_estimated_bandwidth(struct tb *tb) | |
1232 | { | |
1233 | struct tb_cm *tcm = tb_priv(tb); | |
1234 | int i; | |
1235 | ||
1236 | tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n"); | |
1237 | ||
1238 | for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { | |
1239 | struct tb_bandwidth_group *group = &tcm->groups[i]; | |
1240 | ||
1241 | if (!list_empty(&group->ports)) | |
1242 | tb_recalc_estimated_bandwidth_for_group(group); | |
1243 | } | |
1244 | ||
1245 | tb_dbg(tb, "bandwidth re-calculation done\n"); | |
1246 | } | |
1247 | ||
e876f34a MW |
1248 | static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) |
1249 | { | |
1250 | struct tb_port *host_port, *port; | |
1251 | struct tb_cm *tcm = tb_priv(tb); | |
1252 | ||
1253 | host_port = tb_route(in->sw) ? | |
1254 | tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; | |
1255 | ||
1256 | list_for_each_entry(port, &tcm->dp_resources, list) { | |
1257 | if (!tb_port_is_dpout(port)) | |
1258 | continue; | |
1259 | ||
1260 | if (tb_port_is_enabled(port)) { | |
b0ef48fc | 1261 | tb_port_dbg(port, "DP OUT in use\n"); |
e876f34a MW |
1262 | continue; |
1263 | } | |
1264 | ||
1265 | tb_port_dbg(port, "DP OUT available\n"); | |
1266 | ||
1267 | /* | |
1268 | * Keep the DP tunnel under the topology starting from | |
1269 | * the same host router downstream port. | |
1270 | */ | |
1271 | if (host_port && tb_route(port->sw)) { | |
1272 | struct tb_port *p; | |
1273 | ||
1274 | p = tb_port_at(tb_route(port->sw), tb->root_switch); | |
1275 | if (p != host_port) | |
1276 | continue; | |
1277 | } | |
1278 | ||
1279 | return port; | |
1280 | } | |
1281 | ||
1282 | return NULL; | |
1283 | } | |
1284 | ||
8afe909b | 1285 | static void tb_tunnel_dp(struct tb *tb) |
4f807e47 | 1286 | { |
9d2d0a5c | 1287 | int available_up, available_down, ret, link_nr; |
4f807e47 | 1288 | struct tb_cm *tcm = tb_priv(tb); |
8afe909b | 1289 | struct tb_port *port, *in, *out; |
4f807e47 | 1290 | struct tb_tunnel *tunnel; |
4f807e47 | 1291 | |
c6da62a2 MW |
1292 | if (!tb_acpi_may_tunnel_dp()) { |
1293 | tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); | |
1294 | return; | |
1295 | } | |
1296 | ||
8afe909b MW |
1297 | /* |
1298 | * Find pair of inactive DP IN and DP OUT adapters and then | |
1299 | * establish a DP tunnel between them. | |
1300 | */ | |
1301 | tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); | |
1302 | ||
1303 | in = NULL; | |
1304 | out = NULL; | |
1305 | list_for_each_entry(port, &tcm->dp_resources, list) { | |
e876f34a MW |
1306 | if (!tb_port_is_dpin(port)) |
1307 | continue; | |
1308 | ||
8afe909b | 1309 | if (tb_port_is_enabled(port)) { |
b0ef48fc | 1310 | tb_port_dbg(port, "DP IN in use\n"); |
8afe909b MW |
1311 | continue; |
1312 | } | |
4f807e47 | 1313 | |
e876f34a | 1314 | tb_port_dbg(port, "DP IN available\n"); |
8afe909b | 1315 | |
e876f34a MW |
1316 | out = tb_find_dp_out(tb, port); |
1317 | if (out) { | |
8afe909b | 1318 | in = port; |
e876f34a MW |
1319 | break; |
1320 | } | |
8afe909b MW |
1321 | } |
1322 | ||
1323 | if (!in) { | |
1324 | tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); | |
1325 | return; | |
1326 | } | |
1327 | if (!out) { | |
1328 | tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); | |
1329 | return; | |
1330 | } | |
1331 | ||
9d2d0a5c MW |
1332 | /* |
1333 | * This is only applicable to links that are not bonded (so | |
1334 | * when Thunderbolt 1 hardware is involved somewhere in the | |
1335 | * topology). For these try to share the DP bandwidth between | |
1336 | * the two lanes. | |
1337 | */ | |
1338 | link_nr = 1; | |
1339 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
1340 | if (tb_tunnel_is_dp(tunnel)) { | |
1341 | link_nr = 0; | |
1342 | break; | |
1343 | } | |
1344 | } | |
1345 | ||
6ac6faee MW |
1346 | /* |
1347 | * DP stream needs the domain to be active so runtime resume | |
1348 | * both ends of the tunnel. | |
1349 | * | |
1350 | * This should bring the routers in the middle active as well | |
1351 | * and keeps the domain from runtime suspending while the DP | |
1352 | * tunnel is active. | |
1353 | */ | |
1354 | pm_runtime_get_sync(&in->sw->dev); | |
1355 | pm_runtime_get_sync(&out->sw->dev); | |
1356 | ||
8afe909b MW |
1357 | if (tb_switch_alloc_dp_resource(in->sw, in)) { |
1358 | tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); | |
6ac6faee | 1359 | goto err_rpm_put; |
8afe909b | 1360 | } |
4f807e47 | 1361 | |
6ce35635 MW |
1362 | if (!tb_attach_bandwidth_group(tcm, in, out)) |
1363 | goto err_dealloc_dp; | |
1364 | ||
0bd680cd MW |
1365 | /* Make all unused USB3 bandwidth available for the new DP tunnel */ |
1366 | ret = tb_release_unused_usb3_bandwidth(tb, in, out); | |
1367 | if (ret) { | |
1368 | tb_warn(tb, "failed to release unused bandwidth\n"); | |
6ce35635 | 1369 | goto err_detach_group; |
a11b88ad MW |
1370 | } |
1371 | ||
6ce35635 | 1372 | ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); |
0bd680cd | 1373 | if (ret) |
6ce35635 | 1374 | goto err_reclaim_usb; |
0bd680cd MW |
1375 | |
1376 | tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", | |
1377 | available_up, available_down); | |
a11b88ad | 1378 | |
9d2d0a5c MW |
1379 | tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, |
1380 | available_down); | |
4f807e47 | 1381 | if (!tunnel) { |
8afe909b | 1382 | tb_port_dbg(out, "could not allocate DP tunnel\n"); |
6ce35635 | 1383 | goto err_reclaim_usb; |
4f807e47 MW |
1384 | } |
1385 | ||
1386 | if (tb_tunnel_activate(tunnel)) { | |
1387 | tb_port_info(out, "DP tunnel activation failed, aborting\n"); | |
0bd680cd | 1388 | goto err_free; |
4f807e47 MW |
1389 | } |
1390 | ||
1391 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
0bd680cd | 1392 | tb_reclaim_usb3_bandwidth(tb, in, out); |
6ce35635 MW |
1393 | |
1394 | /* Update the domain with the new bandwidth estimation */ | |
1395 | tb_recalc_estimated_bandwidth(tb); | |
1396 | ||
3084b48f GF |
1397 | /* |
1398 | * In case of DP tunnel exists, change host router's 1st children | |
1399 | * TMU mode to HiFi for CL0s to work. | |
1400 | */ | |
7d283f41 | 1401 | tb_increase_tmu_accuracy(tunnel); |
8afe909b MW |
1402 | return; |
1403 | ||
0bd680cd MW |
1404 | err_free: |
1405 | tb_tunnel_free(tunnel); | |
6ce35635 | 1406 | err_reclaim_usb: |
0bd680cd | 1407 | tb_reclaim_usb3_bandwidth(tb, in, out); |
6ce35635 MW |
1408 | err_detach_group: |
1409 | tb_detach_bandwidth_group(in); | |
0bd680cd | 1410 | err_dealloc_dp: |
8afe909b | 1411 | tb_switch_dealloc_dp_resource(in->sw, in); |
6ac6faee MW |
1412 | err_rpm_put: |
1413 | pm_runtime_mark_last_busy(&out->sw->dev); | |
1414 | pm_runtime_put_autosuspend(&out->sw->dev); | |
1415 | pm_runtime_mark_last_busy(&in->sw->dev); | |
1416 | pm_runtime_put_autosuspend(&in->sw->dev); | |
4f807e47 MW |
1417 | } |
1418 | ||
8afe909b | 1419 | static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) |
4f807e47 | 1420 | { |
8afe909b MW |
1421 | struct tb_port *in, *out; |
1422 | struct tb_tunnel *tunnel; | |
1423 | ||
1424 | if (tb_port_is_dpin(port)) { | |
1425 | tb_port_dbg(port, "DP IN resource unavailable\n"); | |
1426 | in = port; | |
1427 | out = NULL; | |
1428 | } else { | |
1429 | tb_port_dbg(port, "DP OUT resource unavailable\n"); | |
1430 | in = NULL; | |
1431 | out = port; | |
1432 | } | |
1433 | ||
1434 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); | |
1435 | tb_deactivate_and_free_tunnel(tunnel); | |
1436 | list_del_init(&port->list); | |
1437 | ||
1438 | /* | |
1439 | * See if there is another DP OUT port that can be used for | |
1440 | * to create another tunnel. | |
1441 | */ | |
6ce35635 | 1442 | tb_recalc_estimated_bandwidth(tb); |
8afe909b MW |
1443 | tb_tunnel_dp(tb); |
1444 | } | |
1445 | ||
1446 | static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) | |
1447 | { | |
1448 | struct tb_cm *tcm = tb_priv(tb); | |
1449 | struct tb_port *p; | |
1450 | ||
1451 | if (tb_port_is_enabled(port)) | |
1452 | return; | |
1453 | ||
1454 | list_for_each_entry(p, &tcm->dp_resources, list) { | |
1455 | if (p == port) | |
1456 | return; | |
1457 | } | |
1458 | ||
1459 | tb_port_dbg(port, "DP %s resource available\n", | |
1460 | tb_port_is_dpin(port) ? "IN" : "OUT"); | |
1461 | list_add_tail(&port->list, &tcm->dp_resources); | |
1462 | ||
1463 | /* Look for suitable DP IN <-> DP OUT pairs now */ | |
1464 | tb_tunnel_dp(tb); | |
4f807e47 MW |
1465 | } |
1466 | ||
81a2e3e4 MW |
1467 | static void tb_disconnect_and_release_dp(struct tb *tb) |
1468 | { | |
1469 | struct tb_cm *tcm = tb_priv(tb); | |
1470 | struct tb_tunnel *tunnel, *n; | |
1471 | ||
1472 | /* | |
1473 | * Tear down all DP tunnels and release their resources. They | |
1474 | * will be re-established after resume based on plug events. | |
1475 | */ | |
1476 | list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { | |
1477 | if (tb_tunnel_is_dp(tunnel)) | |
1478 | tb_deactivate_and_free_tunnel(tunnel); | |
1479 | } | |
1480 | ||
1481 | while (!list_empty(&tcm->dp_resources)) { | |
1482 | struct tb_port *port; | |
1483 | ||
1484 | port = list_first_entry(&tcm->dp_resources, | |
1485 | struct tb_port, list); | |
1486 | list_del_init(&port->list); | |
1487 | } | |
1488 | } | |
1489 | ||
3da88be2 MW |
1490 | static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) |
1491 | { | |
1492 | struct tb_tunnel *tunnel; | |
1493 | struct tb_port *up; | |
1494 | ||
1495 | up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); | |
1496 | if (WARN_ON(!up)) | |
1497 | return -ENODEV; | |
1498 | ||
1499 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); | |
1500 | if (WARN_ON(!tunnel)) | |
1501 | return -ENODEV; | |
1502 | ||
30a4eca6 MW |
1503 | tb_switch_xhci_disconnect(sw); |
1504 | ||
3da88be2 MW |
1505 | tb_tunnel_deactivate(tunnel); |
1506 | list_del(&tunnel->list); | |
1507 | tb_tunnel_free(tunnel); | |
1508 | return 0; | |
1509 | } | |
1510 | ||
99cabbb0 MW |
1511 | static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) |
1512 | { | |
1513 | struct tb_port *up, *down, *port; | |
1514 | struct tb_cm *tcm = tb_priv(tb); | |
99cabbb0 | 1515 | struct tb_tunnel *tunnel; |
3364f0c1 | 1516 | |
386e5e29 | 1517 | up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); |
99cabbb0 MW |
1518 | if (!up) |
1519 | return 0; | |
3364f0c1 | 1520 | |
99cabbb0 MW |
1521 | /* |
1522 | * Look up available down port. Since we are chaining it should | |
1523 | * be found right above this switch. | |
1524 | */ | |
7ce54221 GF |
1525 | port = tb_switch_downstream_port(sw); |
1526 | down = tb_find_pcie_down(tb_switch_parent(sw), port); | |
99cabbb0 MW |
1527 | if (!down) |
1528 | return 0; | |
1529 | ||
1530 | tunnel = tb_tunnel_alloc_pci(tb, up, down); | |
1531 | if (!tunnel) | |
1532 | return -ENOMEM; | |
1533 | ||
1534 | if (tb_tunnel_activate(tunnel)) { | |
1535 | tb_port_info(up, | |
1536 | "PCIe tunnel activation failed, aborting\n"); | |
1537 | tb_tunnel_free(tunnel); | |
1538 | return -EIO; | |
3364f0c1 | 1539 | } |
99cabbb0 | 1540 | |
43f977bc GF |
1541 | /* |
1542 | * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it | |
1543 | * here. | |
1544 | */ | |
1545 | if (tb_switch_pcie_l1_enable(sw)) | |
1546 | tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); | |
1547 | ||
30a4eca6 MW |
1548 | if (tb_switch_xhci_connect(sw)) |
1549 | tb_sw_warn(sw, "failed to connect xHCI\n"); | |
1550 | ||
99cabbb0 MW |
1551 | list_add_tail(&tunnel->list, &tcm->tunnel_list); |
1552 | return 0; | |
3364f0c1 | 1553 | } |
9da672a4 | 1554 | |
180b0689 MW |
1555 | static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
1556 | int transmit_path, int transmit_ring, | |
1557 | int receive_path, int receive_ring) | |
7ea4cd6b MW |
1558 | { |
1559 | struct tb_cm *tcm = tb_priv(tb); | |
1560 | struct tb_port *nhi_port, *dst_port; | |
1561 | struct tb_tunnel *tunnel; | |
1562 | struct tb_switch *sw; | |
53ba2e16 | 1563 | int ret; |
7ea4cd6b MW |
1564 | |
1565 | sw = tb_to_switch(xd->dev.parent); | |
1566 | dst_port = tb_port_at(xd->route, sw); | |
386e5e29 | 1567 | nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); |
7ea4cd6b MW |
1568 | |
1569 | mutex_lock(&tb->lock); | |
53ba2e16 MW |
1570 | |
1571 | /* | |
1572 | * When tunneling DMA paths the link should not enter CL states | |
1573 | * so disable them now. | |
1574 | */ | |
1575 | tb_disable_clx(sw); | |
1576 | ||
180b0689 MW |
1577 | tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, |
1578 | transmit_ring, receive_path, receive_ring); | |
7ea4cd6b | 1579 | if (!tunnel) { |
53ba2e16 MW |
1580 | ret = -ENOMEM; |
1581 | goto err_clx; | |
7ea4cd6b MW |
1582 | } |
1583 | ||
1584 | if (tb_tunnel_activate(tunnel)) { | |
1585 | tb_port_info(nhi_port, | |
1586 | "DMA tunnel activation failed, aborting\n"); | |
53ba2e16 MW |
1587 | ret = -EIO; |
1588 | goto err_free; | |
7ea4cd6b MW |
1589 | } |
1590 | ||
1591 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
1592 | mutex_unlock(&tb->lock); | |
1593 | return 0; | |
53ba2e16 MW |
1594 | |
1595 | err_free: | |
1596 | tb_tunnel_free(tunnel); | |
1597 | err_clx: | |
1598 | tb_enable_clx(sw); | |
1599 | mutex_unlock(&tb->lock); | |
1600 | ||
1601 | return ret; | |
7ea4cd6b MW |
1602 | } |
1603 | ||
180b0689 MW |
1604 | static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
1605 | int transmit_path, int transmit_ring, | |
1606 | int receive_path, int receive_ring) | |
7ea4cd6b | 1607 | { |
180b0689 MW |
1608 | struct tb_cm *tcm = tb_priv(tb); |
1609 | struct tb_port *nhi_port, *dst_port; | |
1610 | struct tb_tunnel *tunnel, *n; | |
7ea4cd6b MW |
1611 | struct tb_switch *sw; |
1612 | ||
1613 | sw = tb_to_switch(xd->dev.parent); | |
1614 | dst_port = tb_port_at(xd->route, sw); | |
180b0689 | 1615 | nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); |
7ea4cd6b | 1616 | |
180b0689 MW |
1617 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
1618 | if (!tb_tunnel_is_dma(tunnel)) | |
1619 | continue; | |
1620 | if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) | |
1621 | continue; | |
1622 | ||
1623 | if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, | |
1624 | receive_path, receive_ring)) | |
1625 | tb_deactivate_and_free_tunnel(tunnel); | |
1626 | } | |
53ba2e16 MW |
1627 | |
1628 | /* | |
1629 | * Try to re-enable CL states now, it is OK if this fails | |
1630 | * because we may still have another DMA tunnel active through | |
1631 | * the same host router USB4 downstream port. | |
1632 | */ | |
1633 | tb_enable_clx(sw); | |
7ea4cd6b MW |
1634 | } |
1635 | ||
180b0689 MW |
1636 | static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, |
1637 | int transmit_path, int transmit_ring, | |
1638 | int receive_path, int receive_ring) | |
7ea4cd6b MW |
1639 | { |
1640 | if (!xd->is_unplugged) { | |
1641 | mutex_lock(&tb->lock); | |
180b0689 MW |
1642 | __tb_disconnect_xdomain_paths(tb, xd, transmit_path, |
1643 | transmit_ring, receive_path, | |
1644 | receive_ring); | |
7ea4cd6b MW |
1645 | mutex_unlock(&tb->lock); |
1646 | } | |
1647 | return 0; | |
1648 | } | |
1649 | ||
d6cc51cd AN |
1650 | /* hotplug handling */ |
1651 | ||
877e50b3 | 1652 | /* |
d6cc51cd AN |
1653 | * tb_handle_hotplug() - handle hotplug event |
1654 | * | |
1655 | * Executes on tb->wq. | |
1656 | */ | |
1657 | static void tb_handle_hotplug(struct work_struct *work) | |
1658 | { | |
1659 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); | |
1660 | struct tb *tb = ev->tb; | |
9d3cce0b | 1661 | struct tb_cm *tcm = tb_priv(tb); |
053596d9 AN |
1662 | struct tb_switch *sw; |
1663 | struct tb_port *port; | |
284652a4 | 1664 | |
6ac6faee MW |
1665 | /* Bring the domain back from sleep if it was suspended */ |
1666 | pm_runtime_get_sync(&tb->dev); | |
1667 | ||
d6cc51cd | 1668 | mutex_lock(&tb->lock); |
9d3cce0b | 1669 | if (!tcm->hotplug_active) |
d6cc51cd AN |
1670 | goto out; /* during init, suspend or shutdown */ |
1671 | ||
8f965efd | 1672 | sw = tb_switch_find_by_route(tb, ev->route); |
053596d9 AN |
1673 | if (!sw) { |
1674 | tb_warn(tb, | |
1675 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n", | |
1676 | ev->route, ev->port, ev->unplug); | |
1677 | goto out; | |
1678 | } | |
1679 | if (ev->port > sw->config.max_port_number) { | |
1680 | tb_warn(tb, | |
1681 | "hotplug event from non existent port %llx:%x (unplug: %d)\n", | |
1682 | ev->route, ev->port, ev->unplug); | |
8f965efd | 1683 | goto put_sw; |
053596d9 AN |
1684 | } |
1685 | port = &sw->ports[ev->port]; | |
1686 | if (tb_is_upstream_port(port)) { | |
dfe40ca4 MW |
1687 | tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", |
1688 | ev->route, ev->port, ev->unplug); | |
8f965efd | 1689 | goto put_sw; |
053596d9 | 1690 | } |
6ac6faee MW |
1691 | |
1692 | pm_runtime_get_sync(&sw->dev); | |
1693 | ||
053596d9 | 1694 | if (ev->unplug) { |
dacb1287 KK |
1695 | tb_retimer_remove_all(port); |
1696 | ||
dfe40ca4 | 1697 | if (tb_port_has_remote(port)) { |
7ea4cd6b | 1698 | tb_port_dbg(port, "switch unplugged\n"); |
aae20bb6 | 1699 | tb_sw_set_unplugged(port->remote->sw); |
3364f0c1 | 1700 | tb_free_invalid_tunnels(tb); |
8afe909b | 1701 | tb_remove_dp_resources(port->remote->sw); |
cf29b9af | 1702 | tb_switch_tmu_disable(port->remote->sw); |
de462039 | 1703 | tb_switch_unconfigure_link(port->remote->sw); |
91c0c120 | 1704 | tb_switch_lane_bonding_disable(port->remote->sw); |
bfe778ac | 1705 | tb_switch_remove(port->remote->sw); |
053596d9 | 1706 | port->remote = NULL; |
dfe40ca4 MW |
1707 | if (port->dual_link_port) |
1708 | port->dual_link_port->remote = NULL; | |
8afe909b | 1709 | /* Maybe we can create another DP tunnel */ |
6ce35635 | 1710 | tb_recalc_estimated_bandwidth(tb); |
8afe909b | 1711 | tb_tunnel_dp(tb); |
7ea4cd6b MW |
1712 | } else if (port->xdomain) { |
1713 | struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); | |
1714 | ||
1715 | tb_port_dbg(port, "xdomain unplugged\n"); | |
1716 | /* | |
1717 | * Service drivers are unbound during | |
1718 | * tb_xdomain_remove() so setting XDomain as | |
1719 | * unplugged here prevents deadlock if they call | |
1720 | * tb_xdomain_disable_paths(). We will tear down | |
180b0689 | 1721 | * all the tunnels below. |
7ea4cd6b MW |
1722 | */ |
1723 | xd->is_unplugged = true; | |
1724 | tb_xdomain_remove(xd); | |
1725 | port->xdomain = NULL; | |
180b0689 | 1726 | __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); |
7ea4cd6b | 1727 | tb_xdomain_put(xd); |
284652a4 | 1728 | tb_port_unconfigure_xdomain(port); |
8afe909b MW |
1729 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
1730 | tb_dp_resource_unavailable(tb, port); | |
30a4eca6 MW |
1731 | } else if (!port->port) { |
1732 | tb_sw_dbg(sw, "xHCI disconnect request\n"); | |
1733 | tb_switch_xhci_disconnect(sw); | |
053596d9 | 1734 | } else { |
62efe699 MW |
1735 | tb_port_dbg(port, |
1736 | "got unplug event for disconnected port, ignoring\n"); | |
053596d9 AN |
1737 | } |
1738 | } else if (port->remote) { | |
62efe699 | 1739 | tb_port_dbg(port, "got plug event for connected port, ignoring\n"); |
30a4eca6 MW |
1740 | } else if (!port->port && sw->authorized) { |
1741 | tb_sw_dbg(sw, "xHCI connect request\n"); | |
1742 | tb_switch_xhci_connect(sw); | |
053596d9 | 1743 | } else { |
344e0643 | 1744 | if (tb_port_is_null(port)) { |
62efe699 | 1745 | tb_port_dbg(port, "hotplug: scanning\n"); |
344e0643 MW |
1746 | tb_scan_port(port); |
1747 | if (!port->remote) | |
62efe699 | 1748 | tb_port_dbg(port, "hotplug: no switch found\n"); |
8afe909b MW |
1749 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
1750 | tb_dp_resource_available(tb, port); | |
344e0643 | 1751 | } |
053596d9 | 1752 | } |
8f965efd | 1753 | |
6ac6faee MW |
1754 | pm_runtime_mark_last_busy(&sw->dev); |
1755 | pm_runtime_put_autosuspend(&sw->dev); | |
1756 | ||
8f965efd MW |
1757 | put_sw: |
1758 | tb_switch_put(sw); | |
d6cc51cd AN |
1759 | out: |
1760 | mutex_unlock(&tb->lock); | |
6ac6faee MW |
1761 | |
1762 | pm_runtime_mark_last_busy(&tb->dev); | |
1763 | pm_runtime_put_autosuspend(&tb->dev); | |
1764 | ||
d6cc51cd AN |
1765 | kfree(ev); |
1766 | } | |
1767 | ||
6ce35635 MW |
1768 | static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, |
1769 | int *requested_down) | |
1770 | { | |
1771 | int allocated_up, allocated_down, available_up, available_down, ret; | |
1772 | int requested_up_corrected, requested_down_corrected, granularity; | |
1773 | int max_up, max_down, max_up_rounded, max_down_rounded; | |
1774 | struct tb *tb = tunnel->tb; | |
1775 | struct tb_port *in, *out; | |
1776 | ||
1777 | ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down); | |
1778 | if (ret) | |
1779 | return ret; | |
1780 | ||
1781 | in = tunnel->src_port; | |
1782 | out = tunnel->dst_port; | |
1783 | ||
1784 | tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n", | |
1785 | allocated_up, allocated_down); | |
1786 | ||
1787 | /* | |
1788 | * If we get rounded up request from graphics side, say HBR2 x 4 | |
1789 | * that is 17500 instead of 17280 (this is because of the | |
1790 | * granularity), we allow it too. Here the graphics has already | |
1791 | * negotiated with the DPRX the maximum possible rates (which is | |
1792 | * 17280 in this case). | |
1793 | * | |
1794 | * Since the link cannot go higher than 17280 we use that in our | |
1795 | * calculations but the DP IN adapter Allocated BW write must be | |
1796 | * the same value (17500) otherwise the adapter will mark it as | |
1797 | * failed for graphics. | |
1798 | */ | |
1799 | ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down); | |
1800 | if (ret) | |
1801 | return ret; | |
1802 | ||
1803 | ret = usb4_dp_port_granularity(in); | |
1804 | if (ret < 0) | |
1805 | return ret; | |
1806 | granularity = ret; | |
1807 | ||
1808 | max_up_rounded = roundup(max_up, granularity); | |
1809 | max_down_rounded = roundup(max_down, granularity); | |
1810 | ||
1811 | /* | |
1812 | * This will "fix" the request down to the maximum supported | |
1813 | * rate * lanes if it is at the maximum rounded up level. | |
1814 | */ | |
1815 | requested_up_corrected = *requested_up; | |
1816 | if (requested_up_corrected == max_up_rounded) | |
1817 | requested_up_corrected = max_up; | |
1818 | else if (requested_up_corrected < 0) | |
1819 | requested_up_corrected = 0; | |
1820 | requested_down_corrected = *requested_down; | |
1821 | if (requested_down_corrected == max_down_rounded) | |
1822 | requested_down_corrected = max_down; | |
1823 | else if (requested_down_corrected < 0) | |
1824 | requested_down_corrected = 0; | |
1825 | ||
1826 | tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n", | |
1827 | requested_up_corrected, requested_down_corrected); | |
1828 | ||
1829 | if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) || | |
1830 | (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) { | |
1831 | tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n", | |
1832 | requested_up_corrected, requested_down_corrected, | |
1833 | max_up_rounded, max_down_rounded); | |
1834 | return -ENOBUFS; | |
1835 | } | |
1836 | ||
1837 | if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || | |
1838 | (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { | |
1839 | /* | |
1840 | * If requested bandwidth is less or equal than what is | |
1841 | * currently allocated to that tunnel we simply change | |
1842 | * the reservation of the tunnel. Since all the tunnels | |
1843 | * going out from the same USB4 port are in the same | |
1844 | * group the released bandwidth will be taken into | |
1845 | * account for the other tunnels automatically below. | |
1846 | */ | |
1847 | return tb_tunnel_alloc_bandwidth(tunnel, requested_up, | |
1848 | requested_down); | |
1849 | } | |
1850 | ||
1851 | /* | |
1852 | * More bandwidth is requested. Release all the potential | |
1853 | * bandwidth from USB3 first. | |
1854 | */ | |
1855 | ret = tb_release_unused_usb3_bandwidth(tb, in, out); | |
1856 | if (ret) | |
1857 | return ret; | |
1858 | ||
1859 | /* | |
1860 | * Then go over all tunnels that cross the same USB4 ports (they | |
1861 | * are also in the same group but we use the same function here | |
1862 | * that we use with the normal bandwidth allocation). | |
1863 | */ | |
1864 | ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); | |
1865 | if (ret) | |
1866 | goto reclaim; | |
1867 | ||
1868 | tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n", | |
1869 | available_up, available_down); | |
1870 | ||
1871 | if ((*requested_up >= 0 && available_up >= requested_up_corrected) || | |
1872 | (*requested_down >= 0 && available_down >= requested_down_corrected)) { | |
1873 | ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, | |
1874 | requested_down); | |
1875 | } else { | |
1876 | ret = -ENOBUFS; | |
1877 | } | |
1878 | ||
1879 | reclaim: | |
1880 | tb_reclaim_usb3_bandwidth(tb, in, out); | |
1881 | return ret; | |
1882 | } | |
1883 | ||
1884 | static void tb_handle_dp_bandwidth_request(struct work_struct *work) | |
1885 | { | |
1886 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); | |
1887 | int requested_bw, requested_up, requested_down, ret; | |
1888 | struct tb_port *in, *out; | |
1889 | struct tb_tunnel *tunnel; | |
1890 | struct tb *tb = ev->tb; | |
1891 | struct tb_cm *tcm = tb_priv(tb); | |
1892 | struct tb_switch *sw; | |
1893 | ||
1894 | pm_runtime_get_sync(&tb->dev); | |
1895 | ||
1896 | mutex_lock(&tb->lock); | |
1897 | if (!tcm->hotplug_active) | |
1898 | goto unlock; | |
1899 | ||
1900 | sw = tb_switch_find_by_route(tb, ev->route); | |
1901 | if (!sw) { | |
1902 | tb_warn(tb, "bandwidth request from non-existent router %llx\n", | |
1903 | ev->route); | |
1904 | goto unlock; | |
1905 | } | |
1906 | ||
1907 | in = &sw->ports[ev->port]; | |
1908 | if (!tb_port_is_dpin(in)) { | |
1909 | tb_port_warn(in, "bandwidth request to non-DP IN adapter\n"); | |
1910 | goto unlock; | |
1911 | } | |
1912 | ||
1913 | tb_port_dbg(in, "handling bandwidth allocation request\n"); | |
1914 | ||
8d73f6b8 | 1915 | if (!usb4_dp_port_bandwidth_mode_enabled(in)) { |
6ce35635 MW |
1916 | tb_port_warn(in, "bandwidth allocation mode not enabled\n"); |
1917 | goto unlock; | |
1918 | } | |
1919 | ||
8d73f6b8 | 1920 | ret = usb4_dp_port_requested_bandwidth(in); |
ace75e18 MW |
1921 | if (ret < 0) { |
1922 | if (ret == -ENODATA) | |
1923 | tb_port_dbg(in, "no bandwidth request active\n"); | |
1924 | else | |
1925 | tb_port_warn(in, "failed to read requested bandwidth\n"); | |
6ce35635 MW |
1926 | goto unlock; |
1927 | } | |
ace75e18 | 1928 | requested_bw = ret; |
6ce35635 MW |
1929 | |
1930 | tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw); | |
1931 | ||
1932 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); | |
1933 | if (!tunnel) { | |
1934 | tb_port_warn(in, "failed to find tunnel\n"); | |
1935 | goto unlock; | |
1936 | } | |
1937 | ||
1938 | out = tunnel->dst_port; | |
1939 | ||
1940 | if (in->sw->config.depth < out->sw->config.depth) { | |
1941 | requested_up = -1; | |
1942 | requested_down = requested_bw; | |
1943 | } else { | |
1944 | requested_up = requested_bw; | |
1945 | requested_down = -1; | |
1946 | } | |
1947 | ||
1948 | ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down); | |
1949 | if (ret) { | |
1950 | if (ret == -ENOBUFS) | |
1951 | tb_port_warn(in, "not enough bandwidth available\n"); | |
1952 | else | |
1953 | tb_port_warn(in, "failed to change bandwidth allocation\n"); | |
1954 | } else { | |
1955 | tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n", | |
1956 | requested_up, requested_down); | |
1957 | ||
1958 | /* Update other clients about the allocation change */ | |
1959 | tb_recalc_estimated_bandwidth(tb); | |
1960 | } | |
1961 | ||
1962 | unlock: | |
1963 | mutex_unlock(&tb->lock); | |
1964 | ||
1965 | pm_runtime_mark_last_busy(&tb->dev); | |
1966 | pm_runtime_put_autosuspend(&tb->dev); | |
1967 | } | |
1968 | ||
1969 | static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) | |
1970 | { | |
1971 | struct tb_hotplug_event *ev; | |
1972 | ||
1973 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); | |
1974 | if (!ev) | |
1975 | return; | |
1976 | ||
1977 | ev->tb = tb; | |
1978 | ev->route = route; | |
1979 | ev->port = port; | |
1980 | INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request); | |
1981 | queue_work(tb->wq, &ev->work); | |
1982 | } | |
1983 | ||
1984 | static void tb_handle_notification(struct tb *tb, u64 route, | |
1985 | const struct cfg_error_pkg *error) | |
1986 | { | |
6ce35635 MW |
1987 | |
1988 | switch (error->error) { | |
235d0194 MW |
1989 | case TB_CFG_ERROR_PCIE_WAKE: |
1990 | case TB_CFG_ERROR_DP_CON_CHANGE: | |
1991 | case TB_CFG_ERROR_DPTX_DISCOVERY: | |
1992 | if (tb_cfg_ack_notification(tb->ctl, route, error)) | |
1993 | tb_warn(tb, "could not ack notification on %llx\n", | |
1994 | route); | |
1995 | break; | |
1996 | ||
6ce35635 | 1997 | case TB_CFG_ERROR_DP_BW: |
235d0194 MW |
1998 | if (tb_cfg_ack_notification(tb->ctl, route, error)) |
1999 | tb_warn(tb, "could not ack notification on %llx\n", | |
2000 | route); | |
6ce35635 MW |
2001 | tb_queue_dp_bandwidth_request(tb, route, error->port); |
2002 | break; | |
2003 | ||
2004 | default: | |
235d0194 MW |
2005 | /* Ignore for now */ |
2006 | break; | |
6ce35635 MW |
2007 | } |
2008 | } | |
2009 | ||
877e50b3 | 2010 | /* |
d6cc51cd AN |
2011 | * tb_schedule_hotplug_handler() - callback function for the control channel |
2012 | * | |
2013 | * Delegates to tb_handle_hotplug. | |
2014 | */ | |
81a54b5e MW |
2015 | static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
2016 | const void *buf, size_t size) | |
d6cc51cd | 2017 | { |
81a54b5e | 2018 | const struct cfg_event_pkg *pkg = buf; |
6ce35635 | 2019 | u64 route = tb_cfg_get_route(&pkg->header); |
81a54b5e | 2020 | |
6ce35635 MW |
2021 | switch (type) { |
2022 | case TB_CFG_PKG_ERROR: | |
2023 | tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf); | |
2024 | return; | |
2025 | case TB_CFG_PKG_EVENT: | |
2026 | break; | |
2027 | default: | |
81a54b5e MW |
2028 | tb_warn(tb, "unexpected event %#x, ignoring\n", type); |
2029 | return; | |
2030 | } | |
2031 | ||
210e9f56 | 2032 | if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { |
81a54b5e MW |
2033 | tb_warn(tb, "could not ack plug event on %llx:%x\n", route, |
2034 | pkg->port); | |
2035 | } | |
2036 | ||
4f807e47 | 2037 | tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); |
d6cc51cd AN |
2038 | } |
2039 | ||
9d3cce0b | 2040 | static void tb_stop(struct tb *tb) |
d6cc51cd | 2041 | { |
9d3cce0b | 2042 | struct tb_cm *tcm = tb_priv(tb); |
93f36ade MW |
2043 | struct tb_tunnel *tunnel; |
2044 | struct tb_tunnel *n; | |
3364f0c1 | 2045 | |
6ac6faee | 2046 | cancel_delayed_work(&tcm->remove_work); |
3364f0c1 | 2047 | /* tunnels are only present after everything has been initialized */ |
7ea4cd6b MW |
2048 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
2049 | /* | |
2050 | * DMA tunnels require the driver to be functional so we | |
2051 | * tear them down. Other protocol tunnels can be left | |
2052 | * intact. | |
2053 | */ | |
2054 | if (tb_tunnel_is_dma(tunnel)) | |
2055 | tb_tunnel_deactivate(tunnel); | |
93f36ade | 2056 | tb_tunnel_free(tunnel); |
7ea4cd6b | 2057 | } |
bfe778ac | 2058 | tb_switch_remove(tb->root_switch); |
9d3cce0b | 2059 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
d6cc51cd AN |
2060 | } |
2061 | ||
99cabbb0 MW |
2062 | static int tb_scan_finalize_switch(struct device *dev, void *data) |
2063 | { | |
2064 | if (tb_is_switch(dev)) { | |
2065 | struct tb_switch *sw = tb_to_switch(dev); | |
2066 | ||
2067 | /* | |
2068 | * If we found that the switch was already setup by the | |
2069 | * boot firmware, mark it as authorized now before we | |
2070 | * send uevent to userspace. | |
2071 | */ | |
2072 | if (sw->boot) | |
2073 | sw->authorized = 1; | |
2074 | ||
2075 | dev_set_uevent_suppress(dev, false); | |
2076 | kobject_uevent(&dev->kobj, KOBJ_ADD); | |
2077 | device_for_each_child(dev, NULL, tb_scan_finalize_switch); | |
2078 | } | |
2079 | ||
2080 | return 0; | |
2081 | } | |
2082 | ||
9d3cce0b | 2083 | static int tb_start(struct tb *tb) |
d6cc51cd | 2084 | { |
9d3cce0b | 2085 | struct tb_cm *tcm = tb_priv(tb); |
bfe778ac | 2086 | int ret; |
d6cc51cd | 2087 | |
bfe778ac | 2088 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
444ac384 MW |
2089 | if (IS_ERR(tb->root_switch)) |
2090 | return PTR_ERR(tb->root_switch); | |
a25c8b2f | 2091 | |
e6b245cc MW |
2092 | /* |
2093 | * ICM firmware upgrade needs running firmware and in native | |
2094 | * mode that is not available so disable firmware upgrade of the | |
2095 | * root switch. | |
5172eb9a SC |
2096 | * |
2097 | * However, USB4 routers support NVM firmware upgrade if they | |
2098 | * implement the necessary router operations. | |
e6b245cc | 2099 | */ |
5172eb9a | 2100 | tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch); |
6ac6faee MW |
2101 | /* All USB4 routers support runtime PM */ |
2102 | tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); | |
e6b245cc | 2103 | |
bfe778ac MW |
2104 | ret = tb_switch_configure(tb->root_switch); |
2105 | if (ret) { | |
2106 | tb_switch_put(tb->root_switch); | |
2107 | return ret; | |
2108 | } | |
2109 | ||
2110 | /* Announce the switch to the world */ | |
2111 | ret = tb_switch_add(tb->root_switch); | |
2112 | if (ret) { | |
2113 | tb_switch_put(tb->root_switch); | |
2114 | return ret; | |
2115 | } | |
2116 | ||
b017a46d GF |
2117 | /* |
2118 | * To support highest CLx state, we set host router's TMU to | |
2119 | * Normal mode. | |
2120 | */ | |
d49b4f04 | 2121 | tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES); |
cf29b9af RM |
2122 | /* Enable TMU if it is off */ |
2123 | tb_switch_tmu_enable(tb->root_switch); | |
9da672a4 AN |
2124 | /* Full scan to discover devices added before the driver was loaded. */ |
2125 | tb_scan_switch(tb->root_switch); | |
0414bec5 | 2126 | /* Find out tunnels created by the boot firmware */ |
43bddb26 | 2127 | tb_discover_tunnels(tb); |
b60e31bf SM |
2128 | /* Add DP resources from the DP tunnels created by the boot firmware */ |
2129 | tb_discover_dp_resources(tb); | |
e6f81858 RM |
2130 | /* |
2131 | * If the boot firmware did not create USB 3.x tunnels create them | |
2132 | * now for the whole topology. | |
2133 | */ | |
2134 | tb_create_usb3_tunnels(tb->root_switch); | |
8afe909b MW |
2135 | /* Add DP IN resources for the root switch */ |
2136 | tb_add_dp_resources(tb->root_switch); | |
99cabbb0 MW |
2137 | /* Make the discovered switches available to the userspace */ |
2138 | device_for_each_child(&tb->root_switch->dev, NULL, | |
2139 | tb_scan_finalize_switch); | |
9da672a4 | 2140 | |
d6cc51cd | 2141 | /* Allow tb_handle_hotplug to progress events */ |
9d3cce0b MW |
2142 | tcm->hotplug_active = true; |
2143 | return 0; | |
d6cc51cd AN |
2144 | } |
2145 | ||
9d3cce0b | 2146 | static int tb_suspend_noirq(struct tb *tb) |
23dd5bb4 | 2147 | { |
9d3cce0b MW |
2148 | struct tb_cm *tcm = tb_priv(tb); |
2149 | ||
daa5140f | 2150 | tb_dbg(tb, "suspending...\n"); |
81a2e3e4 | 2151 | tb_disconnect_and_release_dp(tb); |
6ac6faee | 2152 | tb_switch_suspend(tb->root_switch, false); |
9d3cce0b | 2153 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
daa5140f | 2154 | tb_dbg(tb, "suspend finished\n"); |
9d3cce0b MW |
2155 | |
2156 | return 0; | |
23dd5bb4 AN |
2157 | } |
2158 | ||
91c0c120 MW |
2159 | static void tb_restore_children(struct tb_switch *sw) |
2160 | { | |
2161 | struct tb_port *port; | |
2162 | ||
6ac6faee MW |
2163 | /* No need to restore if the router is already unplugged */ |
2164 | if (sw->is_unplugged) | |
2165 | return; | |
2166 | ||
1a9b6cb8 MW |
2167 | if (tb_enable_clx(sw)) |
2168 | tb_sw_warn(sw, "failed to re-enable CL states\n"); | |
b017a46d | 2169 | |
cf29b9af RM |
2170 | if (tb_enable_tmu(sw)) |
2171 | tb_sw_warn(sw, "failed to restore TMU configuration\n"); | |
2172 | ||
d49b4f04 MW |
2173 | tb_switch_configuration_valid(sw); |
2174 | ||
91c0c120 | 2175 | tb_switch_for_each_port(sw, port) { |
284652a4 | 2176 | if (!tb_port_has_remote(port) && !port->xdomain) |
91c0c120 MW |
2177 | continue; |
2178 | ||
284652a4 MW |
2179 | if (port->remote) { |
2180 | tb_switch_lane_bonding_enable(port->remote->sw); | |
2181 | tb_switch_configure_link(port->remote->sw); | |
91c0c120 | 2182 | |
284652a4 MW |
2183 | tb_restore_children(port->remote->sw); |
2184 | } else if (port->xdomain) { | |
f9cad07b | 2185 | tb_port_configure_xdomain(port, port->xdomain); |
284652a4 | 2186 | } |
91c0c120 MW |
2187 | } |
2188 | } | |
2189 | ||
9d3cce0b | 2190 | static int tb_resume_noirq(struct tb *tb) |
23dd5bb4 | 2191 | { |
9d3cce0b | 2192 | struct tb_cm *tcm = tb_priv(tb); |
93f36ade | 2193 | struct tb_tunnel *tunnel, *n; |
43bddb26 MW |
2194 | unsigned int usb3_delay = 0; |
2195 | LIST_HEAD(tunnels); | |
9d3cce0b | 2196 | |
daa5140f | 2197 | tb_dbg(tb, "resuming...\n"); |
23dd5bb4 AN |
2198 | |
2199 | /* remove any pci devices the firmware might have setup */ | |
356b6c4e | 2200 | tb_switch_reset(tb->root_switch); |
23dd5bb4 AN |
2201 | |
2202 | tb_switch_resume(tb->root_switch); | |
2203 | tb_free_invalid_tunnels(tb); | |
2204 | tb_free_unplugged_children(tb->root_switch); | |
91c0c120 | 2205 | tb_restore_children(tb->root_switch); |
43bddb26 MW |
2206 | |
2207 | /* | |
2208 | * If we get here from suspend to disk the boot firmware or the | |
2209 | * restore kernel might have created tunnels of its own. Since | |
2210 | * we cannot be sure they are usable for us we find and tear | |
2211 | * them down. | |
2212 | */ | |
2213 | tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); | |
2214 | list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { | |
2215 | if (tb_tunnel_is_usb3(tunnel)) | |
2216 | usb3_delay = 500; | |
2217 | tb_tunnel_deactivate(tunnel); | |
2218 | tb_tunnel_free(tunnel); | |
2219 | } | |
2220 | ||
2221 | /* Re-create our tunnels now */ | |
2222 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { | |
2223 | /* USB3 requires delay before it can be re-activated */ | |
2224 | if (tb_tunnel_is_usb3(tunnel)) { | |
2225 | msleep(usb3_delay); | |
2226 | /* Only need to do it once */ | |
2227 | usb3_delay = 0; | |
2228 | } | |
93f36ade | 2229 | tb_tunnel_restart(tunnel); |
43bddb26 | 2230 | } |
9d3cce0b | 2231 | if (!list_empty(&tcm->tunnel_list)) { |
23dd5bb4 AN |
2232 | /* |
2233 | * the pcie links need some time to get going. | |
2234 | * 100ms works for me... | |
2235 | */ | |
daa5140f | 2236 | tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); |
23dd5bb4 AN |
2237 | msleep(100); |
2238 | } | |
2239 | /* Allow tb_handle_hotplug to progress events */ | |
9d3cce0b | 2240 | tcm->hotplug_active = true; |
daa5140f | 2241 | tb_dbg(tb, "resume finished\n"); |
9d3cce0b MW |
2242 | |
2243 | return 0; | |
2244 | } | |
2245 | ||
7ea4cd6b MW |
2246 | static int tb_free_unplugged_xdomains(struct tb_switch *sw) |
2247 | { | |
b433d010 MW |
2248 | struct tb_port *port; |
2249 | int ret = 0; | |
7ea4cd6b | 2250 | |
b433d010 | 2251 | tb_switch_for_each_port(sw, port) { |
7ea4cd6b MW |
2252 | if (tb_is_upstream_port(port)) |
2253 | continue; | |
2254 | if (port->xdomain && port->xdomain->is_unplugged) { | |
dacb1287 | 2255 | tb_retimer_remove_all(port); |
7ea4cd6b | 2256 | tb_xdomain_remove(port->xdomain); |
284652a4 | 2257 | tb_port_unconfigure_xdomain(port); |
7ea4cd6b MW |
2258 | port->xdomain = NULL; |
2259 | ret++; | |
2260 | } else if (port->remote) { | |
2261 | ret += tb_free_unplugged_xdomains(port->remote->sw); | |
2262 | } | |
2263 | } | |
2264 | ||
2265 | return ret; | |
2266 | } | |
2267 | ||
884e4d57 MW |
2268 | static int tb_freeze_noirq(struct tb *tb) |
2269 | { | |
2270 | struct tb_cm *tcm = tb_priv(tb); | |
2271 | ||
2272 | tcm->hotplug_active = false; | |
2273 | return 0; | |
2274 | } | |
2275 | ||
2276 | static int tb_thaw_noirq(struct tb *tb) | |
2277 | { | |
2278 | struct tb_cm *tcm = tb_priv(tb); | |
2279 | ||
2280 | tcm->hotplug_active = true; | |
2281 | return 0; | |
2282 | } | |
2283 | ||
7ea4cd6b MW |
2284 | static void tb_complete(struct tb *tb) |
2285 | { | |
2286 | /* | |
2287 | * Release any unplugged XDomains and if there is a case where | |
2288 | * another domain is swapped in place of unplugged XDomain we | |
2289 | * need to run another rescan. | |
2290 | */ | |
2291 | mutex_lock(&tb->lock); | |
2292 | if (tb_free_unplugged_xdomains(tb->root_switch)) | |
2293 | tb_scan_switch(tb->root_switch); | |
2294 | mutex_unlock(&tb->lock); | |
2295 | } | |
2296 | ||
6ac6faee MW |
2297 | static int tb_runtime_suspend(struct tb *tb) |
2298 | { | |
2299 | struct tb_cm *tcm = tb_priv(tb); | |
2300 | ||
2301 | mutex_lock(&tb->lock); | |
2302 | tb_switch_suspend(tb->root_switch, true); | |
2303 | tcm->hotplug_active = false; | |
2304 | mutex_unlock(&tb->lock); | |
2305 | ||
2306 | return 0; | |
2307 | } | |
2308 | ||
2309 | static void tb_remove_work(struct work_struct *work) | |
2310 | { | |
2311 | struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); | |
2312 | struct tb *tb = tcm_to_tb(tcm); | |
2313 | ||
2314 | mutex_lock(&tb->lock); | |
2315 | if (tb->root_switch) { | |
2316 | tb_free_unplugged_children(tb->root_switch); | |
2317 | tb_free_unplugged_xdomains(tb->root_switch); | |
2318 | } | |
2319 | mutex_unlock(&tb->lock); | |
2320 | } | |
2321 | ||
2322 | static int tb_runtime_resume(struct tb *tb) | |
2323 | { | |
2324 | struct tb_cm *tcm = tb_priv(tb); | |
2325 | struct tb_tunnel *tunnel, *n; | |
2326 | ||
2327 | mutex_lock(&tb->lock); | |
2328 | tb_switch_resume(tb->root_switch); | |
2329 | tb_free_invalid_tunnels(tb); | |
2330 | tb_restore_children(tb->root_switch); | |
2331 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) | |
2332 | tb_tunnel_restart(tunnel); | |
2333 | tcm->hotplug_active = true; | |
2334 | mutex_unlock(&tb->lock); | |
2335 | ||
2336 | /* | |
2337 | * Schedule cleanup of any unplugged devices. Run this in a | |
2338 | * separate thread to avoid possible deadlock if the device | |
2339 | * removal runtime resumes the unplugged device. | |
2340 | */ | |
2341 | queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); | |
2342 | return 0; | |
2343 | } | |
2344 | ||
9d3cce0b MW |
2345 | static const struct tb_cm_ops tb_cm_ops = { |
2346 | .start = tb_start, | |
2347 | .stop = tb_stop, | |
2348 | .suspend_noirq = tb_suspend_noirq, | |
2349 | .resume_noirq = tb_resume_noirq, | |
884e4d57 MW |
2350 | .freeze_noirq = tb_freeze_noirq, |
2351 | .thaw_noirq = tb_thaw_noirq, | |
7ea4cd6b | 2352 | .complete = tb_complete, |
6ac6faee MW |
2353 | .runtime_suspend = tb_runtime_suspend, |
2354 | .runtime_resume = tb_runtime_resume, | |
81a54b5e | 2355 | .handle_event = tb_handle_event, |
3da88be2 | 2356 | .disapprove_switch = tb_disconnect_pci, |
99cabbb0 | 2357 | .approve_switch = tb_tunnel_pci, |
7ea4cd6b MW |
2358 | .approve_xdomain_paths = tb_approve_xdomain_paths, |
2359 | .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, | |
9d3cce0b MW |
2360 | }; |
2361 | ||
349bfe08 MW |
2362 | /* |
2363 | * During suspend the Thunderbolt controller is reset and all PCIe | |
2364 | * tunnels are lost. The NHI driver will try to reestablish all tunnels | |
2365 | * during resume. This adds device links between the tunneled PCIe | |
2366 | * downstream ports and the NHI so that the device core will make sure | |
2367 | * NHI is resumed first before the rest. | |
2368 | */ | |
2369 | static void tb_apple_add_links(struct tb_nhi *nhi) | |
2370 | { | |
2371 | struct pci_dev *upstream, *pdev; | |
2372 | ||
2373 | if (!x86_apple_machine) | |
2374 | return; | |
2375 | ||
2376 | switch (nhi->pdev->device) { | |
2377 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: | |
2378 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: | |
2379 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: | |
2380 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: | |
2381 | break; | |
2382 | default: | |
2383 | return; | |
2384 | } | |
2385 | ||
2386 | upstream = pci_upstream_bridge(nhi->pdev); | |
2387 | while (upstream) { | |
2388 | if (!pci_is_pcie(upstream)) | |
2389 | return; | |
2390 | if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) | |
2391 | break; | |
2392 | upstream = pci_upstream_bridge(upstream); | |
2393 | } | |
2394 | ||
2395 | if (!upstream) | |
2396 | return; | |
2397 | ||
2398 | /* | |
2399 | * For each hotplug downstream port, create add device link | |
2400 | * back to NHI so that PCIe tunnels can be re-established after | |
2401 | * sleep. | |
2402 | */ | |
2403 | for_each_pci_bridge(pdev, upstream->subordinate) { | |
2404 | const struct device_link *link; | |
2405 | ||
2406 | if (!pci_is_pcie(pdev)) | |
2407 | continue; | |
2408 | if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || | |
2409 | !pdev->is_hotplug_bridge) | |
2410 | continue; | |
2411 | ||
2412 | link = device_link_add(&pdev->dev, &nhi->pdev->dev, | |
2413 | DL_FLAG_AUTOREMOVE_SUPPLIER | | |
2414 | DL_FLAG_PM_RUNTIME); | |
2415 | if (link) { | |
2416 | dev_dbg(&nhi->pdev->dev, "created link from %s\n", | |
2417 | dev_name(&pdev->dev)); | |
2418 | } else { | |
2419 | dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", | |
2420 | dev_name(&pdev->dev)); | |
2421 | } | |
2422 | } | |
2423 | } | |
2424 | ||
9d3cce0b MW |
2425 | struct tb *tb_probe(struct tb_nhi *nhi) |
2426 | { | |
2427 | struct tb_cm *tcm; | |
2428 | struct tb *tb; | |
2429 | ||
7f0a34d7 | 2430 | tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); |
9d3cce0b MW |
2431 | if (!tb) |
2432 | return NULL; | |
2433 | ||
c6da62a2 MW |
2434 | if (tb_acpi_may_tunnel_pcie()) |
2435 | tb->security_level = TB_SECURITY_USER; | |
2436 | else | |
2437 | tb->security_level = TB_SECURITY_NOPCIE; | |
2438 | ||
9d3cce0b MW |
2439 | tb->cm_ops = &tb_cm_ops; |
2440 | ||
2441 | tcm = tb_priv(tb); | |
2442 | INIT_LIST_HEAD(&tcm->tunnel_list); | |
8afe909b | 2443 | INIT_LIST_HEAD(&tcm->dp_resources); |
6ac6faee | 2444 | INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); |
6ce35635 | 2445 | tb_init_bandwidth_groups(tcm); |
9d3cce0b | 2446 | |
e0258805 MW |
2447 | tb_dbg(tb, "using software connection manager\n"); |
2448 | ||
349bfe08 MW |
2449 | tb_apple_add_links(nhi); |
2450 | tb_acpi_add_links(nhi); | |
2451 | ||
9d3cce0b | 2452 | return tb; |
23dd5bb4 | 2453 | } |