Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6cc51cd | 2 | /* |
99cabbb0 | 3 | * Thunderbolt driver - bus logic (NHI independent) |
d6cc51cd AN |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
99cabbb0 | 6 | * Copyright (C) 2019, Intel Corporation |
d6cc51cd AN |
7 | */ |
8 | ||
9 | #include <linux/slab.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/delay.h> | |
12 | ||
13 | #include "tb.h" | |
7adf6097 | 14 | #include "tb_regs.h" |
1752b9f7 | 15 | #include "tunnel.h" |
d6cc51cd | 16 | |
9d3cce0b MW |
17 | /** |
18 | * struct tb_cm - Simple Thunderbolt connection manager | |
19 | * @tunnel_list: List of active tunnels | |
8afe909b | 20 | * @dp_resources: List of available DP resources for DP tunneling |
9d3cce0b MW |
21 | * @hotplug_active: tb_handle_hotplug will stop progressing plug |
22 | * events and exit if this is not set (it needs to | |
23 | * acquire the lock one more time). Used to drain wq | |
24 | * after cfg has been paused. | |
25 | */ | |
26 | struct tb_cm { | |
27 | struct list_head tunnel_list; | |
8afe909b | 28 | struct list_head dp_resources; |
9d3cce0b MW |
29 | bool hotplug_active; |
30 | }; | |
9da672a4 | 31 | |
4f807e47 MW |
32 | struct tb_hotplug_event { |
33 | struct work_struct work; | |
34 | struct tb *tb; | |
35 | u64 route; | |
36 | u8 port; | |
37 | bool unplug; | |
38 | }; | |
39 | ||
40 | static void tb_handle_hotplug(struct work_struct *work); | |
41 | ||
42 | static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) | |
43 | { | |
44 | struct tb_hotplug_event *ev; | |
45 | ||
46 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); | |
47 | if (!ev) | |
48 | return; | |
49 | ||
50 | ev->tb = tb; | |
51 | ev->route = route; | |
52 | ev->port = port; | |
53 | ev->unplug = unplug; | |
54 | INIT_WORK(&ev->work, tb_handle_hotplug); | |
55 | queue_work(tb->wq, &ev->work); | |
56 | } | |
57 | ||
9da672a4 AN |
58 | /* enumeration & hot plug handling */ |
59 | ||
8afe909b MW |
60 | static void tb_add_dp_resources(struct tb_switch *sw) |
61 | { | |
62 | struct tb_cm *tcm = tb_priv(sw->tb); | |
63 | struct tb_port *port; | |
64 | ||
65 | tb_switch_for_each_port(sw, port) { | |
66 | if (!tb_port_is_dpin(port)) | |
67 | continue; | |
68 | ||
69 | if (!tb_switch_query_dp_resource(sw, port)) | |
70 | continue; | |
71 | ||
72 | list_add_tail(&port->list, &tcm->dp_resources); | |
73 | tb_port_dbg(port, "DP IN resource available\n"); | |
74 | } | |
75 | } | |
76 | ||
77 | static void tb_remove_dp_resources(struct tb_switch *sw) | |
78 | { | |
79 | struct tb_cm *tcm = tb_priv(sw->tb); | |
80 | struct tb_port *port, *tmp; | |
81 | ||
82 | /* Clear children resources first */ | |
83 | tb_switch_for_each_port(sw, port) { | |
84 | if (tb_port_has_remote(port)) | |
85 | tb_remove_dp_resources(port->remote->sw); | |
86 | } | |
87 | ||
88 | list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { | |
89 | if (port->sw == sw) { | |
90 | tb_port_dbg(port, "DP OUT resource unavailable\n"); | |
91 | list_del_init(&port->list); | |
92 | } | |
93 | } | |
94 | } | |
95 | ||
0414bec5 MW |
96 | static void tb_discover_tunnels(struct tb_switch *sw) |
97 | { | |
98 | struct tb *tb = sw->tb; | |
99 | struct tb_cm *tcm = tb_priv(tb); | |
100 | struct tb_port *port; | |
0414bec5 | 101 | |
b433d010 | 102 | tb_switch_for_each_port(sw, port) { |
0414bec5 MW |
103 | struct tb_tunnel *tunnel = NULL; |
104 | ||
0414bec5 | 105 | switch (port->config.type) { |
4f807e47 MW |
106 | case TB_TYPE_DP_HDMI_IN: |
107 | tunnel = tb_tunnel_discover_dp(tb, port); | |
108 | break; | |
109 | ||
0414bec5 MW |
110 | case TB_TYPE_PCIE_DOWN: |
111 | tunnel = tb_tunnel_discover_pci(tb, port); | |
112 | break; | |
113 | ||
e6f81858 RM |
114 | case TB_TYPE_USB3_DOWN: |
115 | tunnel = tb_tunnel_discover_usb3(tb, port); | |
116 | break; | |
117 | ||
0414bec5 MW |
118 | default: |
119 | break; | |
120 | } | |
121 | ||
4f807e47 MW |
122 | if (!tunnel) |
123 | continue; | |
124 | ||
125 | if (tb_tunnel_is_pci(tunnel)) { | |
0414bec5 MW |
126 | struct tb_switch *parent = tunnel->dst_port->sw; |
127 | ||
128 | while (parent != tunnel->src_port->sw) { | |
129 | parent->boot = true; | |
130 | parent = tb_switch_parent(parent); | |
131 | } | |
0414bec5 | 132 | } |
4f807e47 MW |
133 | |
134 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
0414bec5 MW |
135 | } |
136 | ||
b433d010 MW |
137 | tb_switch_for_each_port(sw, port) { |
138 | if (tb_port_has_remote(port)) | |
139 | tb_discover_tunnels(port->remote->sw); | |
0414bec5 MW |
140 | } |
141 | } | |
9da672a4 | 142 | |
7ea4cd6b MW |
143 | static void tb_scan_xdomain(struct tb_port *port) |
144 | { | |
145 | struct tb_switch *sw = port->sw; | |
146 | struct tb *tb = sw->tb; | |
147 | struct tb_xdomain *xd; | |
148 | u64 route; | |
149 | ||
150 | route = tb_downstream_route(port); | |
151 | xd = tb_xdomain_find_by_route(tb, route); | |
152 | if (xd) { | |
153 | tb_xdomain_put(xd); | |
154 | return; | |
155 | } | |
156 | ||
157 | xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, | |
158 | NULL); | |
159 | if (xd) { | |
160 | tb_port_at(route, sw)->xdomain = xd; | |
161 | tb_xdomain_add(xd); | |
162 | } | |
163 | } | |
164 | ||
cf29b9af RM |
165 | static int tb_enable_tmu(struct tb_switch *sw) |
166 | { | |
167 | int ret; | |
168 | ||
169 | /* If it is already enabled in correct mode, don't touch it */ | |
170 | if (tb_switch_tmu_is_enabled(sw)) | |
171 | return 0; | |
172 | ||
173 | ret = tb_switch_tmu_disable(sw); | |
174 | if (ret) | |
175 | return ret; | |
176 | ||
177 | ret = tb_switch_tmu_post_time(sw); | |
178 | if (ret) | |
179 | return ret; | |
180 | ||
181 | return tb_switch_tmu_enable(sw); | |
182 | } | |
183 | ||
e6f81858 RM |
184 | /** |
185 | * tb_find_unused_port() - return the first inactive port on @sw | |
186 | * @sw: Switch to find the port on | |
187 | * @type: Port type to look for | |
188 | */ | |
189 | static struct tb_port *tb_find_unused_port(struct tb_switch *sw, | |
190 | enum tb_port_type type) | |
191 | { | |
192 | struct tb_port *port; | |
193 | ||
194 | tb_switch_for_each_port(sw, port) { | |
195 | if (tb_is_upstream_port(port)) | |
196 | continue; | |
197 | if (port->config.type != type) | |
198 | continue; | |
199 | if (!port->cap_adap) | |
200 | continue; | |
201 | if (tb_port_is_enabled(port)) | |
202 | continue; | |
203 | return port; | |
204 | } | |
205 | return NULL; | |
206 | } | |
207 | ||
208 | static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, | |
209 | const struct tb_port *port) | |
210 | { | |
211 | struct tb_port *down; | |
212 | ||
213 | down = usb4_switch_map_usb3_down(sw, port); | |
214 | if (down) { | |
215 | if (WARN_ON(!tb_port_is_usb3_down(down))) | |
216 | goto out; | |
217 | if (WARN_ON(tb_usb3_port_is_enabled(down))) | |
218 | goto out; | |
219 | ||
220 | return down; | |
221 | } | |
222 | ||
223 | out: | |
224 | return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN); | |
225 | } | |
226 | ||
227 | static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) | |
228 | { | |
229 | struct tb_switch *parent = tb_switch_parent(sw); | |
230 | struct tb_port *up, *down, *port; | |
231 | struct tb_cm *tcm = tb_priv(tb); | |
232 | struct tb_tunnel *tunnel; | |
233 | ||
234 | up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); | |
235 | if (!up) | |
236 | return 0; | |
237 | ||
238 | /* | |
239 | * Look up available down port. Since we are chaining it should | |
240 | * be found right above this switch. | |
241 | */ | |
242 | port = tb_port_at(tb_route(sw), parent); | |
243 | down = tb_find_usb3_down(parent, port); | |
244 | if (!down) | |
245 | return 0; | |
246 | ||
247 | if (tb_route(parent)) { | |
248 | struct tb_port *parent_up; | |
249 | /* | |
250 | * Check first that the parent switch has its upstream USB3 | |
251 | * port enabled. Otherwise the chain is not complete and | |
252 | * there is no point setting up a new tunnel. | |
253 | */ | |
254 | parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); | |
255 | if (!parent_up || !tb_port_is_enabled(parent_up)) | |
256 | return 0; | |
257 | } | |
258 | ||
259 | tunnel = tb_tunnel_alloc_usb3(tb, up, down); | |
260 | if (!tunnel) | |
261 | return -ENOMEM; | |
262 | ||
263 | if (tb_tunnel_activate(tunnel)) { | |
264 | tb_port_info(up, | |
265 | "USB3 tunnel activation failed, aborting\n"); | |
266 | tb_tunnel_free(tunnel); | |
267 | return -EIO; | |
268 | } | |
269 | ||
270 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
271 | return 0; | |
272 | } | |
273 | ||
274 | static int tb_create_usb3_tunnels(struct tb_switch *sw) | |
275 | { | |
276 | struct tb_port *port; | |
277 | int ret; | |
278 | ||
279 | if (tb_route(sw)) { | |
280 | ret = tb_tunnel_usb3(sw->tb, sw); | |
281 | if (ret) | |
282 | return ret; | |
283 | } | |
284 | ||
285 | tb_switch_for_each_port(sw, port) { | |
286 | if (!tb_port_has_remote(port)) | |
287 | continue; | |
288 | ret = tb_create_usb3_tunnels(port->remote->sw); | |
289 | if (ret) | |
290 | return ret; | |
291 | } | |
292 | ||
293 | return 0; | |
294 | } | |
295 | ||
9da672a4 AN |
296 | static void tb_scan_port(struct tb_port *port); |
297 | ||
298 | /** | |
299 | * tb_scan_switch() - scan for and initialize downstream switches | |
300 | */ | |
301 | static void tb_scan_switch(struct tb_switch *sw) | |
302 | { | |
b433d010 MW |
303 | struct tb_port *port; |
304 | ||
305 | tb_switch_for_each_port(sw, port) | |
306 | tb_scan_port(port); | |
9da672a4 AN |
307 | } |
308 | ||
309 | /** | |
310 | * tb_scan_port() - check for and initialize switches below port | |
311 | */ | |
312 | static void tb_scan_port(struct tb_port *port) | |
313 | { | |
99cabbb0 | 314 | struct tb_cm *tcm = tb_priv(port->sw->tb); |
dfe40ca4 | 315 | struct tb_port *upstream_port; |
9da672a4 | 316 | struct tb_switch *sw; |
dfe40ca4 | 317 | |
9da672a4 AN |
318 | if (tb_is_upstream_port(port)) |
319 | return; | |
4f807e47 MW |
320 | |
321 | if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && | |
322 | !tb_dp_port_is_enabled(port)) { | |
323 | tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); | |
324 | tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, | |
325 | false); | |
326 | return; | |
327 | } | |
328 | ||
9da672a4 AN |
329 | if (port->config.type != TB_TYPE_PORT) |
330 | return; | |
343fcb8c AN |
331 | if (port->dual_link_port && port->link_nr) |
332 | return; /* | |
333 | * Downstream switch is reachable through two ports. | |
334 | * Only scan on the primary port (link_nr == 0). | |
335 | */ | |
9da672a4 AN |
336 | if (tb_wait_for_port(port, false) <= 0) |
337 | return; | |
338 | if (port->remote) { | |
7ea4cd6b | 339 | tb_port_dbg(port, "port already has a remote\n"); |
9da672a4 AN |
340 | return; |
341 | } | |
bfe778ac MW |
342 | sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, |
343 | tb_downstream_route(port)); | |
7ea4cd6b MW |
344 | if (IS_ERR(sw)) { |
345 | /* | |
346 | * If there is an error accessing the connected switch | |
347 | * it may be connected to another domain. Also we allow | |
348 | * the other domain to be connected to a max depth switch. | |
349 | */ | |
350 | if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) | |
351 | tb_scan_xdomain(port); | |
9da672a4 | 352 | return; |
7ea4cd6b | 353 | } |
bfe778ac MW |
354 | |
355 | if (tb_switch_configure(sw)) { | |
356 | tb_switch_put(sw); | |
357 | return; | |
358 | } | |
359 | ||
7ea4cd6b MW |
360 | /* |
361 | * If there was previously another domain connected remove it | |
362 | * first. | |
363 | */ | |
364 | if (port->xdomain) { | |
365 | tb_xdomain_remove(port->xdomain); | |
366 | port->xdomain = NULL; | |
367 | } | |
368 | ||
99cabbb0 MW |
369 | /* |
370 | * Do not send uevents until we have discovered all existing | |
371 | * tunnels and know which switches were authorized already by | |
372 | * the boot firmware. | |
373 | */ | |
374 | if (!tcm->hotplug_active) | |
375 | dev_set_uevent_suppress(&sw->dev, true); | |
f67cf491 | 376 | |
bfe778ac MW |
377 | if (tb_switch_add(sw)) { |
378 | tb_switch_put(sw); | |
379 | return; | |
380 | } | |
381 | ||
dfe40ca4 MW |
382 | /* Link the switches using both links if available */ |
383 | upstream_port = tb_upstream_port(sw); | |
384 | port->remote = upstream_port; | |
385 | upstream_port->remote = port; | |
386 | if (port->dual_link_port && upstream_port->dual_link_port) { | |
387 | port->dual_link_port->remote = upstream_port->dual_link_port; | |
388 | upstream_port->dual_link_port->remote = port->dual_link_port; | |
389 | } | |
390 | ||
91c0c120 MW |
391 | /* Enable lane bonding if supported */ |
392 | if (tb_switch_lane_bonding_enable(sw)) | |
393 | tb_sw_warn(sw, "failed to enable lane bonding\n"); | |
394 | ||
cf29b9af RM |
395 | if (tb_enable_tmu(sw)) |
396 | tb_sw_warn(sw, "failed to enable TMU\n"); | |
397 | ||
e6f81858 RM |
398 | /* |
399 | * Create USB 3.x tunnels only when the switch is plugged to the | |
400 | * domain. This is because we scan the domain also during discovery | |
401 | * and want to discover existing USB 3.x tunnels before we create | |
402 | * any new. | |
403 | */ | |
404 | if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) | |
405 | tb_sw_warn(sw, "USB3 tunnel creation failed\n"); | |
406 | ||
9da672a4 AN |
407 | tb_scan_switch(sw); |
408 | } | |
409 | ||
8afe909b MW |
410 | static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, |
411 | struct tb_port *src_port, | |
412 | struct tb_port *dst_port) | |
4f807e47 MW |
413 | { |
414 | struct tb_cm *tcm = tb_priv(tb); | |
415 | struct tb_tunnel *tunnel; | |
416 | ||
417 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
418 | if (tunnel->type == type && | |
419 | ((src_port && src_port == tunnel->src_port) || | |
420 | (dst_port && dst_port == tunnel->dst_port))) { | |
8afe909b | 421 | return tunnel; |
4f807e47 MW |
422 | } |
423 | } | |
424 | ||
8afe909b MW |
425 | return NULL; |
426 | } | |
427 | ||
428 | static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) | |
429 | { | |
430 | if (!tunnel) | |
431 | return; | |
432 | ||
433 | tb_tunnel_deactivate(tunnel); | |
434 | list_del(&tunnel->list); | |
435 | ||
436 | /* | |
437 | * In case of DP tunnel make sure the DP IN resource is deallocated | |
438 | * properly. | |
439 | */ | |
440 | if (tb_tunnel_is_dp(tunnel)) { | |
441 | struct tb_port *in = tunnel->src_port; | |
442 | ||
443 | tb_switch_dealloc_dp_resource(in->sw, in); | |
444 | } | |
445 | ||
446 | tb_tunnel_free(tunnel); | |
4f807e47 MW |
447 | } |
448 | ||
3364f0c1 AN |
449 | /** |
450 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away | |
451 | */ | |
452 | static void tb_free_invalid_tunnels(struct tb *tb) | |
453 | { | |
9d3cce0b | 454 | struct tb_cm *tcm = tb_priv(tb); |
93f36ade MW |
455 | struct tb_tunnel *tunnel; |
456 | struct tb_tunnel *n; | |
9d3cce0b MW |
457 | |
458 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { | |
8afe909b MW |
459 | if (tb_tunnel_is_invalid(tunnel)) |
460 | tb_deactivate_and_free_tunnel(tunnel); | |
3364f0c1 AN |
461 | } |
462 | } | |
463 | ||
23dd5bb4 AN |
464 | /** |
465 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches | |
466 | */ | |
467 | static void tb_free_unplugged_children(struct tb_switch *sw) | |
468 | { | |
b433d010 | 469 | struct tb_port *port; |
dfe40ca4 | 470 | |
b433d010 | 471 | tb_switch_for_each_port(sw, port) { |
dfe40ca4 | 472 | if (!tb_port_has_remote(port)) |
23dd5bb4 | 473 | continue; |
dfe40ca4 | 474 | |
23dd5bb4 | 475 | if (port->remote->sw->is_unplugged) { |
8afe909b | 476 | tb_remove_dp_resources(port->remote->sw); |
91c0c120 | 477 | tb_switch_lane_bonding_disable(port->remote->sw); |
bfe778ac | 478 | tb_switch_remove(port->remote->sw); |
23dd5bb4 | 479 | port->remote = NULL; |
dfe40ca4 MW |
480 | if (port->dual_link_port) |
481 | port->dual_link_port->remote = NULL; | |
23dd5bb4 AN |
482 | } else { |
483 | tb_free_unplugged_children(port->remote->sw); | |
484 | } | |
485 | } | |
486 | } | |
487 | ||
99cabbb0 MW |
488 | static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, |
489 | const struct tb_port *port) | |
3364f0c1 | 490 | { |
b0407983 MW |
491 | struct tb_port *down = NULL; |
492 | ||
99cabbb0 MW |
493 | /* |
494 | * To keep plugging devices consistently in the same PCIe | |
b0407983 | 495 | * hierarchy, do mapping here for switch downstream PCIe ports. |
99cabbb0 | 496 | */ |
b0407983 MW |
497 | if (tb_switch_is_usb4(sw)) { |
498 | down = usb4_switch_map_pcie_down(sw, port); | |
499 | } else if (!tb_route(sw)) { | |
99cabbb0 MW |
500 | int phy_port = tb_phy_port_from_link(port->port); |
501 | int index; | |
9d3cce0b | 502 | |
99cabbb0 MW |
503 | /* |
504 | * Hard-coded Thunderbolt port to PCIe down port mapping | |
505 | * per controller. | |
506 | */ | |
7bffd97e MW |
507 | if (tb_switch_is_cactus_ridge(sw) || |
508 | tb_switch_is_alpine_ridge(sw)) | |
99cabbb0 | 509 | index = !phy_port ? 6 : 7; |
17a8f815 | 510 | else if (tb_switch_is_falcon_ridge(sw)) |
99cabbb0 | 511 | index = !phy_port ? 6 : 8; |
7bffd97e MW |
512 | else if (tb_switch_is_titan_ridge(sw)) |
513 | index = !phy_port ? 8 : 9; | |
99cabbb0 MW |
514 | else |
515 | goto out; | |
516 | ||
517 | /* Validate the hard-coding */ | |
518 | if (WARN_ON(index > sw->config.max_port_number)) | |
519 | goto out; | |
b0407983 MW |
520 | |
521 | down = &sw->ports[index]; | |
522 | } | |
523 | ||
524 | if (down) { | |
525 | if (WARN_ON(!tb_port_is_pcie_down(down))) | |
99cabbb0 | 526 | goto out; |
b0407983 | 527 | if (WARN_ON(tb_pci_port_is_enabled(down))) |
99cabbb0 MW |
528 | goto out; |
529 | ||
b0407983 | 530 | return down; |
99cabbb0 | 531 | } |
3364f0c1 | 532 | |
99cabbb0 | 533 | out: |
e78db6f0 | 534 | return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); |
99cabbb0 | 535 | } |
3364f0c1 | 536 | |
a11b88ad MW |
537 | static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, |
538 | struct tb_port *out) | |
539 | { | |
540 | struct tb_switch *sw = out->sw; | |
541 | struct tb_tunnel *tunnel; | |
542 | int bw, available_bw = 40000; | |
543 | ||
544 | while (sw && sw != in->sw) { | |
545 | bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ | |
546 | /* Leave 10% guard band */ | |
547 | bw -= bw / 10; | |
548 | ||
549 | /* | |
550 | * Check for any active DP tunnels that go through this | |
551 | * switch and reduce their consumed bandwidth from | |
552 | * available. | |
553 | */ | |
554 | list_for_each_entry(tunnel, &tcm->tunnel_list, list) { | |
555 | int consumed_bw; | |
556 | ||
557 | if (!tb_tunnel_switch_on_path(tunnel, sw)) | |
558 | continue; | |
559 | ||
560 | consumed_bw = tb_tunnel_consumed_bandwidth(tunnel); | |
561 | if (consumed_bw < 0) | |
562 | return consumed_bw; | |
563 | ||
564 | bw -= consumed_bw; | |
565 | } | |
566 | ||
567 | if (bw < available_bw) | |
568 | available_bw = bw; | |
569 | ||
570 | sw = tb_switch_parent(sw); | |
571 | } | |
572 | ||
573 | return available_bw; | |
574 | } | |
575 | ||
8afe909b | 576 | static void tb_tunnel_dp(struct tb *tb) |
4f807e47 MW |
577 | { |
578 | struct tb_cm *tcm = tb_priv(tb); | |
8afe909b | 579 | struct tb_port *port, *in, *out; |
4f807e47 | 580 | struct tb_tunnel *tunnel; |
a11b88ad | 581 | int available_bw; |
4f807e47 | 582 | |
8afe909b MW |
583 | /* |
584 | * Find pair of inactive DP IN and DP OUT adapters and then | |
585 | * establish a DP tunnel between them. | |
586 | */ | |
587 | tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); | |
588 | ||
589 | in = NULL; | |
590 | out = NULL; | |
591 | list_for_each_entry(port, &tcm->dp_resources, list) { | |
592 | if (tb_port_is_enabled(port)) { | |
593 | tb_port_dbg(port, "in use\n"); | |
594 | continue; | |
595 | } | |
4f807e47 | 596 | |
8afe909b MW |
597 | tb_port_dbg(port, "available\n"); |
598 | ||
599 | if (!in && tb_port_is_dpin(port)) | |
600 | in = port; | |
601 | else if (!out && tb_port_is_dpout(port)) | |
602 | out = port; | |
603 | } | |
604 | ||
605 | if (!in) { | |
606 | tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); | |
607 | return; | |
608 | } | |
609 | if (!out) { | |
610 | tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); | |
611 | return; | |
612 | } | |
613 | ||
614 | if (tb_switch_alloc_dp_resource(in->sw, in)) { | |
615 | tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); | |
616 | return; | |
617 | } | |
4f807e47 | 618 | |
a11b88ad MW |
619 | /* Calculate available bandwidth between in and out */ |
620 | available_bw = tb_available_bw(tcm, in, out); | |
621 | if (available_bw < 0) { | |
622 | tb_warn(tb, "failed to determine available bandwidth\n"); | |
623 | return; | |
624 | } | |
625 | ||
626 | tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", | |
627 | available_bw); | |
628 | ||
629 | tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); | |
4f807e47 | 630 | if (!tunnel) { |
8afe909b MW |
631 | tb_port_dbg(out, "could not allocate DP tunnel\n"); |
632 | goto dealloc_dp; | |
4f807e47 MW |
633 | } |
634 | ||
635 | if (tb_tunnel_activate(tunnel)) { | |
636 | tb_port_info(out, "DP tunnel activation failed, aborting\n"); | |
637 | tb_tunnel_free(tunnel); | |
8afe909b | 638 | goto dealloc_dp; |
4f807e47 MW |
639 | } |
640 | ||
641 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
8afe909b MW |
642 | return; |
643 | ||
644 | dealloc_dp: | |
645 | tb_switch_dealloc_dp_resource(in->sw, in); | |
4f807e47 MW |
646 | } |
647 | ||
8afe909b | 648 | static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) |
4f807e47 | 649 | { |
8afe909b MW |
650 | struct tb_port *in, *out; |
651 | struct tb_tunnel *tunnel; | |
652 | ||
653 | if (tb_port_is_dpin(port)) { | |
654 | tb_port_dbg(port, "DP IN resource unavailable\n"); | |
655 | in = port; | |
656 | out = NULL; | |
657 | } else { | |
658 | tb_port_dbg(port, "DP OUT resource unavailable\n"); | |
659 | in = NULL; | |
660 | out = port; | |
661 | } | |
662 | ||
663 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); | |
664 | tb_deactivate_and_free_tunnel(tunnel); | |
665 | list_del_init(&port->list); | |
666 | ||
667 | /* | |
668 | * See if there is another DP OUT port that can be used for | |
669 | * to create another tunnel. | |
670 | */ | |
671 | tb_tunnel_dp(tb); | |
672 | } | |
673 | ||
674 | static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) | |
675 | { | |
676 | struct tb_cm *tcm = tb_priv(tb); | |
677 | struct tb_port *p; | |
678 | ||
679 | if (tb_port_is_enabled(port)) | |
680 | return; | |
681 | ||
682 | list_for_each_entry(p, &tcm->dp_resources, list) { | |
683 | if (p == port) | |
684 | return; | |
685 | } | |
686 | ||
687 | tb_port_dbg(port, "DP %s resource available\n", | |
688 | tb_port_is_dpin(port) ? "IN" : "OUT"); | |
689 | list_add_tail(&port->list, &tcm->dp_resources); | |
690 | ||
691 | /* Look for suitable DP IN <-> DP OUT pairs now */ | |
692 | tb_tunnel_dp(tb); | |
4f807e47 MW |
693 | } |
694 | ||
99cabbb0 MW |
695 | static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) |
696 | { | |
697 | struct tb_port *up, *down, *port; | |
698 | struct tb_cm *tcm = tb_priv(tb); | |
699 | struct tb_switch *parent_sw; | |
700 | struct tb_tunnel *tunnel; | |
3364f0c1 | 701 | |
386e5e29 | 702 | up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); |
99cabbb0 MW |
703 | if (!up) |
704 | return 0; | |
3364f0c1 | 705 | |
99cabbb0 MW |
706 | /* |
707 | * Look up available down port. Since we are chaining it should | |
708 | * be found right above this switch. | |
709 | */ | |
710 | parent_sw = tb_to_switch(sw->dev.parent); | |
711 | port = tb_port_at(tb_route(sw), parent_sw); | |
712 | down = tb_find_pcie_down(parent_sw, port); | |
713 | if (!down) | |
714 | return 0; | |
715 | ||
716 | tunnel = tb_tunnel_alloc_pci(tb, up, down); | |
717 | if (!tunnel) | |
718 | return -ENOMEM; | |
719 | ||
720 | if (tb_tunnel_activate(tunnel)) { | |
721 | tb_port_info(up, | |
722 | "PCIe tunnel activation failed, aborting\n"); | |
723 | tb_tunnel_free(tunnel); | |
724 | return -EIO; | |
3364f0c1 | 725 | } |
99cabbb0 MW |
726 | |
727 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
728 | return 0; | |
3364f0c1 | 729 | } |
9da672a4 | 730 | |
7ea4cd6b MW |
731 | static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
732 | { | |
733 | struct tb_cm *tcm = tb_priv(tb); | |
734 | struct tb_port *nhi_port, *dst_port; | |
735 | struct tb_tunnel *tunnel; | |
736 | struct tb_switch *sw; | |
737 | ||
738 | sw = tb_to_switch(xd->dev.parent); | |
739 | dst_port = tb_port_at(xd->route, sw); | |
386e5e29 | 740 | nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); |
7ea4cd6b MW |
741 | |
742 | mutex_lock(&tb->lock); | |
743 | tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, | |
744 | xd->transmit_path, xd->receive_ring, | |
745 | xd->receive_path); | |
746 | if (!tunnel) { | |
747 | mutex_unlock(&tb->lock); | |
748 | return -ENOMEM; | |
749 | } | |
750 | ||
751 | if (tb_tunnel_activate(tunnel)) { | |
752 | tb_port_info(nhi_port, | |
753 | "DMA tunnel activation failed, aborting\n"); | |
754 | tb_tunnel_free(tunnel); | |
755 | mutex_unlock(&tb->lock); | |
756 | return -EIO; | |
757 | } | |
758 | ||
759 | list_add_tail(&tunnel->list, &tcm->tunnel_list); | |
760 | mutex_unlock(&tb->lock); | |
761 | return 0; | |
762 | } | |
763 | ||
764 | static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
765 | { | |
766 | struct tb_port *dst_port; | |
8afe909b | 767 | struct tb_tunnel *tunnel; |
7ea4cd6b MW |
768 | struct tb_switch *sw; |
769 | ||
770 | sw = tb_to_switch(xd->dev.parent); | |
771 | dst_port = tb_port_at(xd->route, sw); | |
772 | ||
773 | /* | |
774 | * It is possible that the tunnel was already teared down (in | |
775 | * case of cable disconnect) so it is fine if we cannot find it | |
776 | * here anymore. | |
777 | */ | |
8afe909b MW |
778 | tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); |
779 | tb_deactivate_and_free_tunnel(tunnel); | |
7ea4cd6b MW |
780 | } |
781 | ||
782 | static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
783 | { | |
784 | if (!xd->is_unplugged) { | |
785 | mutex_lock(&tb->lock); | |
786 | __tb_disconnect_xdomain_paths(tb, xd); | |
787 | mutex_unlock(&tb->lock); | |
788 | } | |
789 | return 0; | |
790 | } | |
791 | ||
d6cc51cd AN |
792 | /* hotplug handling */ |
793 | ||
d6cc51cd AN |
794 | /** |
795 | * tb_handle_hotplug() - handle hotplug event | |
796 | * | |
797 | * Executes on tb->wq. | |
798 | */ | |
799 | static void tb_handle_hotplug(struct work_struct *work) | |
800 | { | |
801 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); | |
802 | struct tb *tb = ev->tb; | |
9d3cce0b | 803 | struct tb_cm *tcm = tb_priv(tb); |
053596d9 AN |
804 | struct tb_switch *sw; |
805 | struct tb_port *port; | |
d6cc51cd | 806 | mutex_lock(&tb->lock); |
9d3cce0b | 807 | if (!tcm->hotplug_active) |
d6cc51cd AN |
808 | goto out; /* during init, suspend or shutdown */ |
809 | ||
8f965efd | 810 | sw = tb_switch_find_by_route(tb, ev->route); |
053596d9 AN |
811 | if (!sw) { |
812 | tb_warn(tb, | |
813 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n", | |
814 | ev->route, ev->port, ev->unplug); | |
815 | goto out; | |
816 | } | |
817 | if (ev->port > sw->config.max_port_number) { | |
818 | tb_warn(tb, | |
819 | "hotplug event from non existent port %llx:%x (unplug: %d)\n", | |
820 | ev->route, ev->port, ev->unplug); | |
8f965efd | 821 | goto put_sw; |
053596d9 AN |
822 | } |
823 | port = &sw->ports[ev->port]; | |
824 | if (tb_is_upstream_port(port)) { | |
dfe40ca4 MW |
825 | tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", |
826 | ev->route, ev->port, ev->unplug); | |
8f965efd | 827 | goto put_sw; |
053596d9 AN |
828 | } |
829 | if (ev->unplug) { | |
dfe40ca4 | 830 | if (tb_port_has_remote(port)) { |
7ea4cd6b | 831 | tb_port_dbg(port, "switch unplugged\n"); |
aae20bb6 | 832 | tb_sw_set_unplugged(port->remote->sw); |
3364f0c1 | 833 | tb_free_invalid_tunnels(tb); |
8afe909b | 834 | tb_remove_dp_resources(port->remote->sw); |
cf29b9af | 835 | tb_switch_tmu_disable(port->remote->sw); |
91c0c120 | 836 | tb_switch_lane_bonding_disable(port->remote->sw); |
bfe778ac | 837 | tb_switch_remove(port->remote->sw); |
053596d9 | 838 | port->remote = NULL; |
dfe40ca4 MW |
839 | if (port->dual_link_port) |
840 | port->dual_link_port->remote = NULL; | |
8afe909b MW |
841 | /* Maybe we can create another DP tunnel */ |
842 | tb_tunnel_dp(tb); | |
7ea4cd6b MW |
843 | } else if (port->xdomain) { |
844 | struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); | |
845 | ||
846 | tb_port_dbg(port, "xdomain unplugged\n"); | |
847 | /* | |
848 | * Service drivers are unbound during | |
849 | * tb_xdomain_remove() so setting XDomain as | |
850 | * unplugged here prevents deadlock if they call | |
851 | * tb_xdomain_disable_paths(). We will tear down | |
852 | * the path below. | |
853 | */ | |
854 | xd->is_unplugged = true; | |
855 | tb_xdomain_remove(xd); | |
856 | port->xdomain = NULL; | |
857 | __tb_disconnect_xdomain_paths(tb, xd); | |
858 | tb_xdomain_put(xd); | |
8afe909b MW |
859 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
860 | tb_dp_resource_unavailable(tb, port); | |
053596d9 | 861 | } else { |
62efe699 MW |
862 | tb_port_dbg(port, |
863 | "got unplug event for disconnected port, ignoring\n"); | |
053596d9 AN |
864 | } |
865 | } else if (port->remote) { | |
62efe699 | 866 | tb_port_dbg(port, "got plug event for connected port, ignoring\n"); |
053596d9 | 867 | } else { |
344e0643 | 868 | if (tb_port_is_null(port)) { |
62efe699 | 869 | tb_port_dbg(port, "hotplug: scanning\n"); |
344e0643 MW |
870 | tb_scan_port(port); |
871 | if (!port->remote) | |
62efe699 | 872 | tb_port_dbg(port, "hotplug: no switch found\n"); |
8afe909b MW |
873 | } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { |
874 | tb_dp_resource_available(tb, port); | |
344e0643 | 875 | } |
053596d9 | 876 | } |
8f965efd MW |
877 | |
878 | put_sw: | |
879 | tb_switch_put(sw); | |
d6cc51cd AN |
880 | out: |
881 | mutex_unlock(&tb->lock); | |
882 | kfree(ev); | |
883 | } | |
884 | ||
885 | /** | |
886 | * tb_schedule_hotplug_handler() - callback function for the control channel | |
887 | * | |
888 | * Delegates to tb_handle_hotplug. | |
889 | */ | |
81a54b5e MW |
890 | static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
891 | const void *buf, size_t size) | |
d6cc51cd | 892 | { |
81a54b5e | 893 | const struct cfg_event_pkg *pkg = buf; |
81a54b5e MW |
894 | u64 route; |
895 | ||
896 | if (type != TB_CFG_PKG_EVENT) { | |
897 | tb_warn(tb, "unexpected event %#x, ignoring\n", type); | |
898 | return; | |
899 | } | |
900 | ||
901 | route = tb_cfg_get_route(&pkg->header); | |
902 | ||
210e9f56 | 903 | if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { |
81a54b5e MW |
904 | tb_warn(tb, "could not ack plug event on %llx:%x\n", route, |
905 | pkg->port); | |
906 | } | |
907 | ||
4f807e47 | 908 | tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); |
d6cc51cd AN |
909 | } |
910 | ||
9d3cce0b | 911 | static void tb_stop(struct tb *tb) |
d6cc51cd | 912 | { |
9d3cce0b | 913 | struct tb_cm *tcm = tb_priv(tb); |
93f36ade MW |
914 | struct tb_tunnel *tunnel; |
915 | struct tb_tunnel *n; | |
3364f0c1 | 916 | |
3364f0c1 | 917 | /* tunnels are only present after everything has been initialized */ |
7ea4cd6b MW |
918 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
919 | /* | |
920 | * DMA tunnels require the driver to be functional so we | |
921 | * tear them down. Other protocol tunnels can be left | |
922 | * intact. | |
923 | */ | |
924 | if (tb_tunnel_is_dma(tunnel)) | |
925 | tb_tunnel_deactivate(tunnel); | |
93f36ade | 926 | tb_tunnel_free(tunnel); |
7ea4cd6b | 927 | } |
bfe778ac | 928 | tb_switch_remove(tb->root_switch); |
9d3cce0b | 929 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
d6cc51cd AN |
930 | } |
931 | ||
99cabbb0 MW |
932 | static int tb_scan_finalize_switch(struct device *dev, void *data) |
933 | { | |
934 | if (tb_is_switch(dev)) { | |
935 | struct tb_switch *sw = tb_to_switch(dev); | |
936 | ||
937 | /* | |
938 | * If we found that the switch was already setup by the | |
939 | * boot firmware, mark it as authorized now before we | |
940 | * send uevent to userspace. | |
941 | */ | |
942 | if (sw->boot) | |
943 | sw->authorized = 1; | |
944 | ||
945 | dev_set_uevent_suppress(dev, false); | |
946 | kobject_uevent(&dev->kobj, KOBJ_ADD); | |
947 | device_for_each_child(dev, NULL, tb_scan_finalize_switch); | |
948 | } | |
949 | ||
950 | return 0; | |
951 | } | |
952 | ||
9d3cce0b | 953 | static int tb_start(struct tb *tb) |
d6cc51cd | 954 | { |
9d3cce0b | 955 | struct tb_cm *tcm = tb_priv(tb); |
bfe778ac | 956 | int ret; |
d6cc51cd | 957 | |
bfe778ac | 958 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
444ac384 MW |
959 | if (IS_ERR(tb->root_switch)) |
960 | return PTR_ERR(tb->root_switch); | |
a25c8b2f | 961 | |
e6b245cc MW |
962 | /* |
963 | * ICM firmware upgrade needs running firmware and in native | |
964 | * mode that is not available so disable firmware upgrade of the | |
965 | * root switch. | |
966 | */ | |
967 | tb->root_switch->no_nvm_upgrade = true; | |
968 | ||
bfe778ac MW |
969 | ret = tb_switch_configure(tb->root_switch); |
970 | if (ret) { | |
971 | tb_switch_put(tb->root_switch); | |
972 | return ret; | |
973 | } | |
974 | ||
975 | /* Announce the switch to the world */ | |
976 | ret = tb_switch_add(tb->root_switch); | |
977 | if (ret) { | |
978 | tb_switch_put(tb->root_switch); | |
979 | return ret; | |
980 | } | |
981 | ||
cf29b9af RM |
982 | /* Enable TMU if it is off */ |
983 | tb_switch_tmu_enable(tb->root_switch); | |
9da672a4 AN |
984 | /* Full scan to discover devices added before the driver was loaded. */ |
985 | tb_scan_switch(tb->root_switch); | |
0414bec5 MW |
986 | /* Find out tunnels created by the boot firmware */ |
987 | tb_discover_tunnels(tb->root_switch); | |
e6f81858 RM |
988 | /* |
989 | * If the boot firmware did not create USB 3.x tunnels create them | |
990 | * now for the whole topology. | |
991 | */ | |
992 | tb_create_usb3_tunnels(tb->root_switch); | |
8afe909b MW |
993 | /* Add DP IN resources for the root switch */ |
994 | tb_add_dp_resources(tb->root_switch); | |
99cabbb0 MW |
995 | /* Make the discovered switches available to the userspace */ |
996 | device_for_each_child(&tb->root_switch->dev, NULL, | |
997 | tb_scan_finalize_switch); | |
9da672a4 | 998 | |
d6cc51cd | 999 | /* Allow tb_handle_hotplug to progress events */ |
9d3cce0b MW |
1000 | tcm->hotplug_active = true; |
1001 | return 0; | |
d6cc51cd AN |
1002 | } |
1003 | ||
9d3cce0b | 1004 | static int tb_suspend_noirq(struct tb *tb) |
23dd5bb4 | 1005 | { |
9d3cce0b MW |
1006 | struct tb_cm *tcm = tb_priv(tb); |
1007 | ||
daa5140f | 1008 | tb_dbg(tb, "suspending...\n"); |
23dd5bb4 | 1009 | tb_switch_suspend(tb->root_switch); |
9d3cce0b | 1010 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
daa5140f | 1011 | tb_dbg(tb, "suspend finished\n"); |
9d3cce0b MW |
1012 | |
1013 | return 0; | |
23dd5bb4 AN |
1014 | } |
1015 | ||
91c0c120 MW |
1016 | static void tb_restore_children(struct tb_switch *sw) |
1017 | { | |
1018 | struct tb_port *port; | |
1019 | ||
cf29b9af RM |
1020 | if (tb_enable_tmu(sw)) |
1021 | tb_sw_warn(sw, "failed to restore TMU configuration\n"); | |
1022 | ||
91c0c120 MW |
1023 | tb_switch_for_each_port(sw, port) { |
1024 | if (!tb_port_has_remote(port)) | |
1025 | continue; | |
1026 | ||
1027 | if (tb_switch_lane_bonding_enable(port->remote->sw)) | |
1028 | dev_warn(&sw->dev, "failed to restore lane bonding\n"); | |
1029 | ||
1030 | tb_restore_children(port->remote->sw); | |
1031 | } | |
1032 | } | |
1033 | ||
9d3cce0b | 1034 | static int tb_resume_noirq(struct tb *tb) |
23dd5bb4 | 1035 | { |
9d3cce0b | 1036 | struct tb_cm *tcm = tb_priv(tb); |
93f36ade | 1037 | struct tb_tunnel *tunnel, *n; |
9d3cce0b | 1038 | |
daa5140f | 1039 | tb_dbg(tb, "resuming...\n"); |
23dd5bb4 AN |
1040 | |
1041 | /* remove any pci devices the firmware might have setup */ | |
1042 | tb_switch_reset(tb, 0); | |
1043 | ||
1044 | tb_switch_resume(tb->root_switch); | |
1045 | tb_free_invalid_tunnels(tb); | |
1046 | tb_free_unplugged_children(tb->root_switch); | |
91c0c120 | 1047 | tb_restore_children(tb->root_switch); |
9d3cce0b | 1048 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
93f36ade | 1049 | tb_tunnel_restart(tunnel); |
9d3cce0b | 1050 | if (!list_empty(&tcm->tunnel_list)) { |
23dd5bb4 AN |
1051 | /* |
1052 | * the pcie links need some time to get going. | |
1053 | * 100ms works for me... | |
1054 | */ | |
daa5140f | 1055 | tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); |
23dd5bb4 AN |
1056 | msleep(100); |
1057 | } | |
1058 | /* Allow tb_handle_hotplug to progress events */ | |
9d3cce0b | 1059 | tcm->hotplug_active = true; |
daa5140f | 1060 | tb_dbg(tb, "resume finished\n"); |
9d3cce0b MW |
1061 | |
1062 | return 0; | |
1063 | } | |
1064 | ||
7ea4cd6b MW |
1065 | static int tb_free_unplugged_xdomains(struct tb_switch *sw) |
1066 | { | |
b433d010 MW |
1067 | struct tb_port *port; |
1068 | int ret = 0; | |
7ea4cd6b | 1069 | |
b433d010 | 1070 | tb_switch_for_each_port(sw, port) { |
7ea4cd6b MW |
1071 | if (tb_is_upstream_port(port)) |
1072 | continue; | |
1073 | if (port->xdomain && port->xdomain->is_unplugged) { | |
1074 | tb_xdomain_remove(port->xdomain); | |
1075 | port->xdomain = NULL; | |
1076 | ret++; | |
1077 | } else if (port->remote) { | |
1078 | ret += tb_free_unplugged_xdomains(port->remote->sw); | |
1079 | } | |
1080 | } | |
1081 | ||
1082 | return ret; | |
1083 | } | |
1084 | ||
1085 | static void tb_complete(struct tb *tb) | |
1086 | { | |
1087 | /* | |
1088 | * Release any unplugged XDomains and if there is a case where | |
1089 | * another domain is swapped in place of unplugged XDomain we | |
1090 | * need to run another rescan. | |
1091 | */ | |
1092 | mutex_lock(&tb->lock); | |
1093 | if (tb_free_unplugged_xdomains(tb->root_switch)) | |
1094 | tb_scan_switch(tb->root_switch); | |
1095 | mutex_unlock(&tb->lock); | |
1096 | } | |
1097 | ||
9d3cce0b MW |
1098 | static const struct tb_cm_ops tb_cm_ops = { |
1099 | .start = tb_start, | |
1100 | .stop = tb_stop, | |
1101 | .suspend_noirq = tb_suspend_noirq, | |
1102 | .resume_noirq = tb_resume_noirq, | |
7ea4cd6b | 1103 | .complete = tb_complete, |
81a54b5e | 1104 | .handle_event = tb_handle_event, |
99cabbb0 | 1105 | .approve_switch = tb_tunnel_pci, |
7ea4cd6b MW |
1106 | .approve_xdomain_paths = tb_approve_xdomain_paths, |
1107 | .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, | |
9d3cce0b MW |
1108 | }; |
1109 | ||
1110 | struct tb *tb_probe(struct tb_nhi *nhi) | |
1111 | { | |
1112 | struct tb_cm *tcm; | |
1113 | struct tb *tb; | |
1114 | ||
1115 | tb = tb_domain_alloc(nhi, sizeof(*tcm)); | |
1116 | if (!tb) | |
1117 | return NULL; | |
1118 | ||
99cabbb0 | 1119 | tb->security_level = TB_SECURITY_USER; |
9d3cce0b MW |
1120 | tb->cm_ops = &tb_cm_ops; |
1121 | ||
1122 | tcm = tb_priv(tb); | |
1123 | INIT_LIST_HEAD(&tcm->tunnel_list); | |
8afe909b | 1124 | INIT_LIST_HEAD(&tcm->dp_resources); |
9d3cce0b MW |
1125 | |
1126 | return tb; | |
23dd5bb4 | 1127 | } |