thunderbolt: Scan only valid NULL adapter ports in hotplug
[linux-2.6-block.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *                  events and exit if this is not set (it needs to
23  *                  acquire the lock one more time). Used to drain wq
24  *                  after cfg has been paused.
25  */
26 struct tb_cm {
27         struct list_head tunnel_list;
28         bool hotplug_active;
29 };
30
31 /* enumeration & hot plug handling */
32
33 static void tb_discover_tunnels(struct tb_switch *sw)
34 {
35         struct tb *tb = sw->tb;
36         struct tb_cm *tcm = tb_priv(tb);
37         struct tb_port *port;
38         int i;
39
40         for (i = 1; i <= sw->config.max_port_number; i++) {
41                 struct tb_tunnel *tunnel = NULL;
42
43                 port = &sw->ports[i];
44                 switch (port->config.type) {
45                 case TB_TYPE_PCIE_DOWN:
46                         tunnel = tb_tunnel_discover_pci(tb, port);
47                         break;
48
49                 default:
50                         break;
51                 }
52
53                 if (tunnel) {
54                         struct tb_switch *parent = tunnel->dst_port->sw;
55
56                         while (parent != tunnel->src_port->sw) {
57                                 parent->boot = true;
58                                 parent = tb_switch_parent(parent);
59                         }
60
61                         list_add_tail(&tunnel->list, &tcm->tunnel_list);
62                 }
63         }
64
65         for (i = 1; i <= sw->config.max_port_number; i++) {
66                 if (tb_port_has_remote(&sw->ports[i]))
67                         tb_discover_tunnels(sw->ports[i].remote->sw);
68         }
69 }
70
71 static void tb_scan_port(struct tb_port *port);
72
73 /**
74  * tb_scan_switch() - scan for and initialize downstream switches
75  */
76 static void tb_scan_switch(struct tb_switch *sw)
77 {
78         int i;
79         for (i = 1; i <= sw->config.max_port_number; i++)
80                 tb_scan_port(&sw->ports[i]);
81 }
82
83 /**
84  * tb_scan_port() - check for and initialize switches below port
85  */
86 static void tb_scan_port(struct tb_port *port)
87 {
88         struct tb_cm *tcm = tb_priv(port->sw->tb);
89         struct tb_port *upstream_port;
90         struct tb_switch *sw;
91
92         if (tb_is_upstream_port(port))
93                 return;
94         if (port->config.type != TB_TYPE_PORT)
95                 return;
96         if (port->dual_link_port && port->link_nr)
97                 return; /*
98                          * Downstream switch is reachable through two ports.
99                          * Only scan on the primary port (link_nr == 0).
100                          */
101         if (tb_wait_for_port(port, false) <= 0)
102                 return;
103         if (port->remote) {
104                 tb_port_WARN(port, "port already has a remote!\n");
105                 return;
106         }
107         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
108                              tb_downstream_route(port));
109         if (!sw)
110                 return;
111
112         if (tb_switch_configure(sw)) {
113                 tb_switch_put(sw);
114                 return;
115         }
116
117         /*
118          * Do not send uevents until we have discovered all existing
119          * tunnels and know which switches were authorized already by
120          * the boot firmware.
121          */
122         if (!tcm->hotplug_active)
123                 dev_set_uevent_suppress(&sw->dev, true);
124
125         if (tb_switch_add(sw)) {
126                 tb_switch_put(sw);
127                 return;
128         }
129
130         /* Link the switches using both links if available */
131         upstream_port = tb_upstream_port(sw);
132         port->remote = upstream_port;
133         upstream_port->remote = port;
134         if (port->dual_link_port && upstream_port->dual_link_port) {
135                 port->dual_link_port->remote = upstream_port->dual_link_port;
136                 upstream_port->dual_link_port->remote = port->dual_link_port;
137         }
138
139         tb_scan_switch(sw);
140 }
141
142 /**
143  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
144  */
145 static void tb_free_invalid_tunnels(struct tb *tb)
146 {
147         struct tb_cm *tcm = tb_priv(tb);
148         struct tb_tunnel *tunnel;
149         struct tb_tunnel *n;
150
151         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
152                 if (tb_tunnel_is_invalid(tunnel)) {
153                         tb_tunnel_deactivate(tunnel);
154                         list_del(&tunnel->list);
155                         tb_tunnel_free(tunnel);
156                 }
157         }
158 }
159
160 /**
161  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
162  */
163 static void tb_free_unplugged_children(struct tb_switch *sw)
164 {
165         int i;
166         for (i = 1; i <= sw->config.max_port_number; i++) {
167                 struct tb_port *port = &sw->ports[i];
168
169                 if (!tb_port_has_remote(port))
170                         continue;
171
172                 if (port->remote->sw->is_unplugged) {
173                         tb_switch_remove(port->remote->sw);
174                         port->remote = NULL;
175                         if (port->dual_link_port)
176                                 port->dual_link_port->remote = NULL;
177                 } else {
178                         tb_free_unplugged_children(port->remote->sw);
179                 }
180         }
181 }
182
183
184 /**
185  * find_pci_up_port() - return the first PCIe up port on @sw or NULL
186  */
187 static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
188 {
189         int i;
190         for (i = 1; i <= sw->config.max_port_number; i++)
191                 if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
192                         return &sw->ports[i];
193         return NULL;
194 }
195
196 /**
197  * find_unused_down_port() - return the first inactive PCIe down port on @sw
198  */
199 static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
200 {
201         int i;
202         int cap;
203         int res;
204         int data;
205         for (i = 1; i <= sw->config.max_port_number; i++) {
206                 if (tb_is_upstream_port(&sw->ports[i]))
207                         continue;
208                 if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
209                         continue;
210                 cap = sw->ports[i].cap_adap;
211                 if (!cap)
212                         continue;
213                 res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
214                 if (res < 0)
215                         continue;
216                 if (data & 0x80000000)
217                         continue;
218                 return &sw->ports[i];
219         }
220         return NULL;
221 }
222
223 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
224                                          const struct tb_port *port)
225 {
226         /*
227          * To keep plugging devices consistently in the same PCIe
228          * hierarchy, do mapping here for root switch downstream PCIe
229          * ports.
230          */
231         if (!tb_route(sw)) {
232                 int phy_port = tb_phy_port_from_link(port->port);
233                 int index;
234
235                 /*
236                  * Hard-coded Thunderbolt port to PCIe down port mapping
237                  * per controller.
238                  */
239                 if (tb_switch_is_cr(sw))
240                         index = !phy_port ? 6 : 7;
241                 else if (tb_switch_is_fr(sw))
242                         index = !phy_port ? 6 : 8;
243                 else
244                         goto out;
245
246                 /* Validate the hard-coding */
247                 if (WARN_ON(index > sw->config.max_port_number))
248                         goto out;
249                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
250                         goto out;
251                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
252                         goto out;
253
254                 return &sw->ports[index];
255         }
256
257 out:
258         return tb_find_unused_down_port(sw);
259 }
260
261 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
262 {
263         struct tb_port *up, *down, *port;
264         struct tb_cm *tcm = tb_priv(tb);
265         struct tb_switch *parent_sw;
266         struct tb_tunnel *tunnel;
267
268         up = tb_find_pci_up_port(sw);
269         if (!up)
270                 return 0;
271
272         /*
273          * Look up available down port. Since we are chaining it should
274          * be found right above this switch.
275          */
276         parent_sw = tb_to_switch(sw->dev.parent);
277         port = tb_port_at(tb_route(sw), parent_sw);
278         down = tb_find_pcie_down(parent_sw, port);
279         if (!down)
280                 return 0;
281
282         tunnel = tb_tunnel_alloc_pci(tb, up, down);
283         if (!tunnel)
284                 return -ENOMEM;
285
286         if (tb_tunnel_activate(tunnel)) {
287                 tb_port_info(up,
288                              "PCIe tunnel activation failed, aborting\n");
289                 tb_tunnel_free(tunnel);
290                 return -EIO;
291         }
292
293         list_add_tail(&tunnel->list, &tcm->tunnel_list);
294         return 0;
295 }
296
297 /* hotplug handling */
298
299 struct tb_hotplug_event {
300         struct work_struct work;
301         struct tb *tb;
302         u64 route;
303         u8 port;
304         bool unplug;
305 };
306
307 /**
308  * tb_handle_hotplug() - handle hotplug event
309  *
310  * Executes on tb->wq.
311  */
312 static void tb_handle_hotplug(struct work_struct *work)
313 {
314         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
315         struct tb *tb = ev->tb;
316         struct tb_cm *tcm = tb_priv(tb);
317         struct tb_switch *sw;
318         struct tb_port *port;
319         mutex_lock(&tb->lock);
320         if (!tcm->hotplug_active)
321                 goto out; /* during init, suspend or shutdown */
322
323         sw = tb_switch_find_by_route(tb, ev->route);
324         if (!sw) {
325                 tb_warn(tb,
326                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
327                         ev->route, ev->port, ev->unplug);
328                 goto out;
329         }
330         if (ev->port > sw->config.max_port_number) {
331                 tb_warn(tb,
332                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
333                         ev->route, ev->port, ev->unplug);
334                 goto put_sw;
335         }
336         port = &sw->ports[ev->port];
337         if (tb_is_upstream_port(port)) {
338                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
339                        ev->route, ev->port, ev->unplug);
340                 goto put_sw;
341         }
342         if (ev->unplug) {
343                 if (tb_port_has_remote(port)) {
344                         tb_port_info(port, "unplugged\n");
345                         tb_sw_set_unplugged(port->remote->sw);
346                         tb_free_invalid_tunnels(tb);
347                         tb_switch_remove(port->remote->sw);
348                         port->remote = NULL;
349                         if (port->dual_link_port)
350                                 port->dual_link_port->remote = NULL;
351                 } else {
352                         tb_port_info(port,
353                                      "got unplug event for disconnected port, ignoring\n");
354                 }
355         } else if (port->remote) {
356                 tb_port_info(port,
357                              "got plug event for connected port, ignoring\n");
358         } else {
359                 if (tb_port_is_null(port)) {
360                         tb_port_info(port, "hotplug: scanning\n");
361                         tb_scan_port(port);
362                         if (!port->remote)
363                                 tb_port_info(port, "hotplug: no switch found\n");
364                 }
365         }
366
367 put_sw:
368         tb_switch_put(sw);
369 out:
370         mutex_unlock(&tb->lock);
371         kfree(ev);
372 }
373
374 /**
375  * tb_schedule_hotplug_handler() - callback function for the control channel
376  *
377  * Delegates to tb_handle_hotplug.
378  */
379 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
380                             const void *buf, size_t size)
381 {
382         const struct cfg_event_pkg *pkg = buf;
383         struct tb_hotplug_event *ev;
384         u64 route;
385
386         if (type != TB_CFG_PKG_EVENT) {
387                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
388                 return;
389         }
390
391         route = tb_cfg_get_route(&pkg->header);
392
393         if (tb_cfg_error(tb->ctl, route, pkg->port,
394                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
395                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
396                         pkg->port);
397         }
398
399         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
400         if (!ev)
401                 return;
402         INIT_WORK(&ev->work, tb_handle_hotplug);
403         ev->tb = tb;
404         ev->route = route;
405         ev->port = pkg->port;
406         ev->unplug = pkg->unplug;
407         queue_work(tb->wq, &ev->work);
408 }
409
410 static void tb_stop(struct tb *tb)
411 {
412         struct tb_cm *tcm = tb_priv(tb);
413         struct tb_tunnel *tunnel;
414         struct tb_tunnel *n;
415
416         /* tunnels are only present after everything has been initialized */
417         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
418                 tb_tunnel_deactivate(tunnel);
419                 tb_tunnel_free(tunnel);
420         }
421         tb_switch_remove(tb->root_switch);
422         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
423 }
424
425 static int tb_scan_finalize_switch(struct device *dev, void *data)
426 {
427         if (tb_is_switch(dev)) {
428                 struct tb_switch *sw = tb_to_switch(dev);
429
430                 /*
431                  * If we found that the switch was already setup by the
432                  * boot firmware, mark it as authorized now before we
433                  * send uevent to userspace.
434                  */
435                 if (sw->boot)
436                         sw->authorized = 1;
437
438                 dev_set_uevent_suppress(dev, false);
439                 kobject_uevent(&dev->kobj, KOBJ_ADD);
440                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
441         }
442
443         return 0;
444 }
445
446 static int tb_start(struct tb *tb)
447 {
448         struct tb_cm *tcm = tb_priv(tb);
449         int ret;
450
451         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
452         if (!tb->root_switch)
453                 return -ENOMEM;
454
455         /*
456          * ICM firmware upgrade needs running firmware and in native
457          * mode that is not available so disable firmware upgrade of the
458          * root switch.
459          */
460         tb->root_switch->no_nvm_upgrade = true;
461
462         ret = tb_switch_configure(tb->root_switch);
463         if (ret) {
464                 tb_switch_put(tb->root_switch);
465                 return ret;
466         }
467
468         /* Announce the switch to the world */
469         ret = tb_switch_add(tb->root_switch);
470         if (ret) {
471                 tb_switch_put(tb->root_switch);
472                 return ret;
473         }
474
475         /* Full scan to discover devices added before the driver was loaded. */
476         tb_scan_switch(tb->root_switch);
477         /* Find out tunnels created by the boot firmware */
478         tb_discover_tunnels(tb->root_switch);
479         /* Make the discovered switches available to the userspace */
480         device_for_each_child(&tb->root_switch->dev, NULL,
481                               tb_scan_finalize_switch);
482
483         /* Allow tb_handle_hotplug to progress events */
484         tcm->hotplug_active = true;
485         return 0;
486 }
487
488 static int tb_suspend_noirq(struct tb *tb)
489 {
490         struct tb_cm *tcm = tb_priv(tb);
491
492         tb_dbg(tb, "suspending...\n");
493         tb_switch_suspend(tb->root_switch);
494         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
495         tb_dbg(tb, "suspend finished\n");
496
497         return 0;
498 }
499
500 static int tb_resume_noirq(struct tb *tb)
501 {
502         struct tb_cm *tcm = tb_priv(tb);
503         struct tb_tunnel *tunnel, *n;
504
505         tb_dbg(tb, "resuming...\n");
506
507         /* remove any pci devices the firmware might have setup */
508         tb_switch_reset(tb, 0);
509
510         tb_switch_resume(tb->root_switch);
511         tb_free_invalid_tunnels(tb);
512         tb_free_unplugged_children(tb->root_switch);
513         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
514                 tb_tunnel_restart(tunnel);
515         if (!list_empty(&tcm->tunnel_list)) {
516                 /*
517                  * the pcie links need some time to get going.
518                  * 100ms works for me...
519                  */
520                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
521                 msleep(100);
522         }
523          /* Allow tb_handle_hotplug to progress events */
524         tcm->hotplug_active = true;
525         tb_dbg(tb, "resume finished\n");
526
527         return 0;
528 }
529
530 static const struct tb_cm_ops tb_cm_ops = {
531         .start = tb_start,
532         .stop = tb_stop,
533         .suspend_noirq = tb_suspend_noirq,
534         .resume_noirq = tb_resume_noirq,
535         .handle_event = tb_handle_event,
536         .approve_switch = tb_tunnel_pci,
537 };
538
539 struct tb *tb_probe(struct tb_nhi *nhi)
540 {
541         struct tb_cm *tcm;
542         struct tb *tb;
543
544         if (!x86_apple_machine)
545                 return NULL;
546
547         tb = tb_domain_alloc(nhi, sizeof(*tcm));
548         if (!tb)
549                 return NULL;
550
551         tb->security_level = TB_SECURITY_USER;
552         tb->cm_ops = &tb_cm_ops;
553
554         tcm = tb_priv(tb);
555         INIT_LIST_HEAD(&tcm->tunnel_list);
556
557         return tb;
558 }