Merge tag 'thunderbolt-for-v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / thunderbolt / switch.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a25c8b2f 2/*
15c6784c 3 * Thunderbolt driver - switch/port utility functions
a25c8b2f
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
15c6784c 6 * Copyright (C) 2018, Intel Corporation
a25c8b2f
AN
7 */
8
9#include <linux/delay.h>
e6b245cc 10#include <linux/idr.h>
87fa05b6 11#include <linux/module.h>
e6b245cc 12#include <linux/nvmem-provider.h>
2d8ff0b5 13#include <linux/pm_runtime.h>
09f11b6c 14#include <linux/sched/signal.h>
e6b245cc 15#include <linux/sizes.h>
10fefe56 16#include <linux/slab.h>
87fa05b6 17#include <linux/string_helpers.h>
a25c8b2f
AN
18
19#include "tb.h"
20
e6b245cc
MW
21/* Switch NVM support */
22
e6b245cc
MW
23struct nvm_auth_status {
24 struct list_head list;
7c39ffe7 25 uuid_t uuid;
e6b245cc
MW
26 u32 status;
27};
28
29/*
30 * Hold NVM authentication failure status per switch This information
31 * needs to stay around even when the switch gets power cycled so we
32 * keep it separately.
33 */
34static LIST_HEAD(nvm_auth_status_cache);
35static DEFINE_MUTEX(nvm_auth_status_lock);
36
37static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
38{
39 struct nvm_auth_status *st;
40
41 list_for_each_entry(st, &nvm_auth_status_cache, list) {
7c39ffe7 42 if (uuid_equal(&st->uuid, sw->uuid))
e6b245cc
MW
43 return st;
44 }
45
46 return NULL;
47}
48
49static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
50{
51 struct nvm_auth_status *st;
52
53 mutex_lock(&nvm_auth_status_lock);
54 st = __nvm_get_auth_status(sw);
55 mutex_unlock(&nvm_auth_status_lock);
56
57 *status = st ? st->status : 0;
58}
59
60static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
61{
62 struct nvm_auth_status *st;
63
64 if (WARN_ON(!sw->uuid))
65 return;
66
67 mutex_lock(&nvm_auth_status_lock);
68 st = __nvm_get_auth_status(sw);
69
70 if (!st) {
71 st = kzalloc(sizeof(*st), GFP_KERNEL);
72 if (!st)
73 goto unlock;
74
75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
76 INIT_LIST_HEAD(&st->list);
77 list_add_tail(&st->list, &nvm_auth_status_cache);
78 }
79
80 st->status = status;
81unlock:
82 mutex_unlock(&nvm_auth_status_lock);
83}
84
85static void nvm_clear_auth_status(const struct tb_switch *sw)
86{
87 struct nvm_auth_status *st;
88
89 mutex_lock(&nvm_auth_status_lock);
90 st = __nvm_get_auth_status(sw);
91 if (st) {
92 list_del(&st->list);
93 kfree(st);
94 }
95 mutex_unlock(&nvm_auth_status_lock);
96}
97
98static int nvm_validate_and_write(struct tb_switch *sw)
99{
aef9c693
SC
100 unsigned int image_size;
101 const u8 *buf;
e6b245cc
MW
102 int ret;
103
aef9c693
SC
104 ret = tb_nvm_validate(sw->nvm);
105 if (ret)
106 return ret;
e6b245cc 107
aef9c693
SC
108 ret = tb_nvm_write_headers(sw->nvm);
109 if (ret)
110 return ret;
e6b245cc 111
aef9c693
SC
112 buf = sw->nvm->buf_data_start;
113 image_size = sw->nvm->buf_data_size;
e6b245cc 114
b0407983 115 if (tb_switch_is_usb4(sw))
4b794f80
ML
116 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
117 else
118 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
aef9c693
SC
119 if (ret)
120 return ret;
121
122 sw->nvm->flushed = true;
123 return 0;
e6b245cc
MW
124}
125
b0407983 126static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
e6b245cc 127{
7a7ebfa8 128 int ret = 0;
e6b245cc
MW
129
130 /*
131 * Root switch NVM upgrade requires that we disconnect the
d1ff7024 132 * existing paths first (in case it is not in safe mode
e6b245cc
MW
133 * already).
134 */
135 if (!sw->safe_mode) {
7a7ebfa8
MW
136 u32 status;
137
d1ff7024 138 ret = tb_domain_disconnect_all_paths(sw->tb);
e6b245cc
MW
139 if (ret)
140 return ret;
141 /*
142 * The host controller goes away pretty soon after this if
143 * everything goes well so getting timeout is expected.
144 */
145 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
146 if (!ret || ret == -ETIMEDOUT)
147 return 0;
148
149 /*
150 * Any error from update auth operation requires power
151 * cycling of the host router.
152 */
153 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
154 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
155 nvm_set_auth_status(sw, status);
e6b245cc
MW
156 }
157
158 /*
159 * From safe mode we can get out by just power cycling the
160 * switch.
161 */
162 dma_port_power_cycle(sw->dma_port);
7a7ebfa8 163 return ret;
e6b245cc
MW
164}
165
b0407983 166static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
e6b245cc
MW
167{
168 int ret, retries = 10;
169
170 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
171 switch (ret) {
172 case 0:
173 case -ETIMEDOUT:
174 case -EACCES:
175 case -EINVAL:
176 /* Power cycle is required */
177 break;
178 default:
e6b245cc 179 return ret;
7a7ebfa8 180 }
e6b245cc
MW
181
182 /*
183 * Poll here for the authentication status. It takes some time
184 * for the device to respond (we get timeout for a while). Once
185 * we get response the device needs to be power cycled in order
186 * to the new NVM to be taken into use.
187 */
188 do {
189 u32 status;
190
191 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
192 if (ret < 0 && ret != -ETIMEDOUT)
193 return ret;
194 if (ret > 0) {
195 if (status) {
196 tb_sw_warn(sw, "failed to authenticate NVM\n");
197 nvm_set_auth_status(sw, status);
198 }
199
200 tb_sw_info(sw, "power cycling the switch now\n");
201 dma_port_power_cycle(sw->dma_port);
202 return 0;
203 }
204
205 msleep(500);
206 } while (--retries);
207
208 return -ETIMEDOUT;
209}
210
b0407983
MW
211static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
212{
213 struct pci_dev *root_port;
214
215 /*
216 * During host router NVM upgrade we should not allow root port to
217 * go into D3cold because some root ports cannot trigger PME
218 * itself. To be on the safe side keep the root port in D0 during
219 * the whole upgrade process.
220 */
6ae72bfa 221 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
222 if (root_port)
223 pm_runtime_get_noresume(&root_port->dev);
224}
225
226static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
227{
228 struct pci_dev *root_port;
229
6ae72bfa 230 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
231 if (root_port)
232 pm_runtime_put(&root_port->dev);
233}
234
235static inline bool nvm_readable(struct tb_switch *sw)
236{
237 if (tb_switch_is_usb4(sw)) {
238 /*
239 * USB4 devices must support NVM operations but it is
240 * optional for hosts. Therefore we query the NVM sector
241 * size here and if it is supported assume NVM
242 * operations are implemented.
243 */
244 return usb4_switch_nvm_sector_size(sw) > 0;
245 }
246
247 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
248 return !!sw->dma_port;
249}
250
251static inline bool nvm_upgradeable(struct tb_switch *sw)
252{
253 if (sw->no_nvm_upgrade)
254 return false;
255 return nvm_readable(sw);
256}
257
1cbf680f 258static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
b0407983
MW
259{
260 int ret;
261
1cbf680f
MW
262 if (tb_switch_is_usb4(sw)) {
263 if (auth_only) {
264 ret = usb4_switch_nvm_set_offset(sw, 0);
265 if (ret)
266 return ret;
267 }
268 sw->nvm->authenticating = true;
b0407983 269 return usb4_switch_nvm_authenticate(sw);
1cbf680f 270 }
4e99c98e
AS
271 if (auth_only)
272 return -EOPNOTSUPP;
b0407983 273
1cbf680f 274 sw->nvm->authenticating = true;
b0407983
MW
275 if (!tb_route(sw)) {
276 nvm_authenticate_start_dma_port(sw);
277 ret = nvm_authenticate_host_dma_port(sw);
278 } else {
279 ret = nvm_authenticate_device_dma_port(sw);
280 }
281
282 return ret;
283}
284
7bfafaa5
SC
285/**
286 * tb_switch_nvm_read() - Read router NVM
287 * @sw: Router whose NVM to read
288 * @address: Start address on the NVM
289 * @buf: Buffer where the read data is copied
290 * @size: Size of the buffer in bytes
291 *
292 * Reads from router NVM and returns the requested data in @buf. Locking
293 * is up to the caller. Returns %0 in success and negative errno in case
294 * of failure.
295 */
296int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
297 size_t size)
298{
299 if (tb_switch_is_usb4(sw))
300 return usb4_switch_nvm_read(sw, address, buf, size);
301 return dma_port_flash_read(sw->dma_port, address, buf, size);
302}
303
304static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
e6b245cc 305{
719a5fe8
MW
306 struct tb_nvm *nvm = priv;
307 struct tb_switch *sw = tb_to_switch(nvm->dev);
2d8ff0b5
MW
308 int ret;
309
310 pm_runtime_get_sync(&sw->dev);
4f7c2e0d
MW
311
312 if (!mutex_trylock(&sw->tb->lock)) {
313 ret = restart_syscall();
314 goto out;
315 }
316
7bfafaa5 317 ret = tb_switch_nvm_read(sw, offset, val, bytes);
4f7c2e0d
MW
318 mutex_unlock(&sw->tb->lock);
319
320out:
2d8ff0b5
MW
321 pm_runtime_mark_last_busy(&sw->dev);
322 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 323
2d8ff0b5 324 return ret;
e6b245cc
MW
325}
326
7bfafaa5 327static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
e6b245cc 328{
719a5fe8
MW
329 struct tb_nvm *nvm = priv;
330 struct tb_switch *sw = tb_to_switch(nvm->dev);
331 int ret;
e6b245cc 332
09f11b6c
MW
333 if (!mutex_trylock(&sw->tb->lock))
334 return restart_syscall();
e6b245cc
MW
335
336 /*
337 * Since writing the NVM image might require some special steps,
338 * for example when CSS headers are written, we cache the image
339 * locally here and handle the special cases when the user asks
340 * us to authenticate the image.
341 */
719a5fe8 342 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
09f11b6c 343 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
344
345 return ret;
346}
347
e6b245cc
MW
348static int tb_switch_nvm_add(struct tb_switch *sw)
349{
719a5fe8 350 struct tb_nvm *nvm;
e6b245cc
MW
351 int ret;
352
b0407983 353 if (!nvm_readable(sw))
e6b245cc
MW
354 return 0;
355
aef9c693
SC
356 nvm = tb_nvm_alloc(&sw->dev);
357 if (IS_ERR(nvm)) {
358 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
359 goto err_nvm;
b0407983
MW
360 }
361
aef9c693
SC
362 ret = tb_nvm_read_version(nvm);
363 if (ret)
364 goto err_nvm;
e6b245cc
MW
365
366 /*
367 * If the switch is in safe-mode the only accessible portion of
368 * the NVM is the non-active one where userspace is expected to
369 * write new functional NVM.
370 */
371 if (!sw->safe_mode) {
aef9c693 372 ret = tb_nvm_add_active(nvm, nvm_read);
719a5fe8
MW
373 if (ret)
374 goto err_nvm;
d80d926c 375 tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor);
e6b245cc
MW
376 }
377
3f415e5e 378 if (!sw->no_nvm_upgrade) {
aef9c693 379 ret = tb_nvm_add_non_active(nvm, nvm_write);
719a5fe8
MW
380 if (ret)
381 goto err_nvm;
e6b245cc 382 }
e6b245cc 383
e6b245cc 384 sw->nvm = nvm;
e6b245cc
MW
385 return 0;
386
719a5fe8 387err_nvm:
aef9c693
SC
388 tb_sw_dbg(sw, "NVM upgrade disabled\n");
389 sw->no_nvm_upgrade = true;
390 if (!IS_ERR(nvm))
391 tb_nvm_free(nvm);
392
e6b245cc
MW
393 return ret;
394}
395
396static void tb_switch_nvm_remove(struct tb_switch *sw)
397{
719a5fe8 398 struct tb_nvm *nvm;
e6b245cc 399
e6b245cc
MW
400 nvm = sw->nvm;
401 sw->nvm = NULL;
e6b245cc
MW
402
403 if (!nvm)
404 return;
405
406 /* Remove authentication status in case the switch is unplugged */
407 if (!nvm->authenticating)
408 nvm_clear_auth_status(sw);
409
719a5fe8 410 tb_nvm_free(nvm);
e6b245cc
MW
411}
412
a25c8b2f
AN
413/* port utility functions */
414
1c561e4e 415static const char *tb_port_type(const struct tb_regs_port_header *port)
a25c8b2f
AN
416{
417 switch (port->type >> 16) {
418 case 0:
419 switch ((u8) port->type) {
420 case 0:
421 return "Inactive";
422 case 1:
423 return "Port";
424 case 2:
425 return "NHI";
426 default:
427 return "unknown";
428 }
429 case 0x2:
430 return "Ethernet";
431 case 0x8:
432 return "SATA";
433 case 0xe:
434 return "DP/HDMI";
435 case 0x10:
436 return "PCIe";
437 case 0x20:
438 return "USB";
439 default:
440 return "unknown";
441 }
442}
443
56ad3aef 444static void tb_dump_port(struct tb *tb, const struct tb_port *port)
a25c8b2f 445{
56ad3aef
MW
446 const struct tb_regs_port_header *regs = &port->config;
447
daa5140f
MW
448 tb_dbg(tb,
449 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
56ad3aef
MW
450 regs->port_number, regs->vendor_id, regs->device_id,
451 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
452 regs->type);
daa5140f 453 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
56ad3aef
MW
454 regs->max_in_hop_id, regs->max_out_hop_id);
455 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
456 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
457 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
458 port->ctl_credits);
a25c8b2f
AN
459}
460
9da672a4
AN
461/**
462 * tb_port_state() - get connectedness state of a port
5cc0df9c 463 * @port: the port to check
9da672a4
AN
464 *
465 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
466 *
467 * Return: Returns an enum tb_port_state on success or an error code on failure.
468 */
5cc0df9c 469int tb_port_state(struct tb_port *port)
9da672a4
AN
470{
471 struct tb_cap_phy phy;
472 int res;
473 if (port->cap_phy == 0) {
474 tb_port_WARN(port, "does not have a PHY\n");
475 return -EINVAL;
476 }
477 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
478 if (res)
479 return res;
480 return phy.state;
481}
482
483/**
484 * tb_wait_for_port() - wait for a port to become ready
5c6b471b
MW
485 * @port: Port to wait
486 * @wait_if_unplugged: Wait also when port is unplugged
9da672a4
AN
487 *
488 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
489 * wait_if_unplugged is set then we also wait if the port is in state
490 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
491 * switch resume). Otherwise we only wait if a device is registered but the link
492 * has not yet been established.
493 *
494 * Return: Returns an error code on failure. Returns 0 if the port is not
495 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
496 * if the port is connected and in state TB_PORT_UP.
497 */
498int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
499{
500 int retries = 10;
501 int state;
502 if (!port->cap_phy) {
503 tb_port_WARN(port, "does not have PHY\n");
504 return -EINVAL;
505 }
506 if (tb_is_upstream_port(port)) {
507 tb_port_WARN(port, "is the upstream port\n");
508 return -EINVAL;
509 }
510
511 while (retries--) {
512 state = tb_port_state(port);
e70a8f36
MW
513 switch (state) {
514 case TB_PORT_DISABLED:
62efe699 515 tb_port_dbg(port, "is disabled (state: 0)\n");
9da672a4 516 return 0;
e70a8f36
MW
517
518 case TB_PORT_UNPLUGGED:
9da672a4
AN
519 if (wait_if_unplugged) {
520 /* used during resume */
62efe699
MW
521 tb_port_dbg(port,
522 "is unplugged (state: 7), retrying...\n");
9da672a4 523 msleep(100);
e70a8f36 524 break;
9da672a4 525 }
62efe699 526 tb_port_dbg(port, "is unplugged (state: 7)\n");
9da672a4 527 return 0;
e70a8f36
MW
528
529 case TB_PORT_UP:
530 case TB_PORT_TX_CL0S:
531 case TB_PORT_RX_CL0S:
532 case TB_PORT_CL1:
533 case TB_PORT_CL2:
534 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
9da672a4 535 return 1;
e70a8f36
MW
536
537 default:
538 if (state < 0)
539 return state;
540
541 /*
542 * After plug-in the state is TB_PORT_CONNECTING. Give it some
543 * time.
544 */
545 tb_port_dbg(port,
546 "is connected, link is not up (state: %d), retrying...\n",
547 state);
548 msleep(100);
9da672a4
AN
549 }
550
9da672a4
AN
551 }
552 tb_port_warn(port,
553 "failed to reach state TB_PORT_UP. Ignoring port...\n");
554 return 0;
555}
556
520b6702
AN
557/**
558 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
5c6b471b
MW
559 * @port: Port to add/remove NFC credits
560 * @credits: Credits to add/remove
520b6702
AN
561 *
562 * Change the number of NFC credits allocated to @port by @credits. To remove
563 * NFC credits pass a negative amount of credits.
564 *
565 * Return: Returns 0 on success or an error code on failure.
566 */
567int tb_port_add_nfc_credits(struct tb_port *port, int credits)
568{
c5ee6feb
MW
569 u32 nfc_credits;
570
571 if (credits == 0 || port->sw->is_unplugged)
520b6702 572 return 0;
c5ee6feb 573
edfbd68b
MW
574 /*
575 * USB4 restricts programming NFC buffers to lane adapters only
576 * so skip other ports.
577 */
578 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
579 return 0;
580
8f57d478 581 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
6cb27a04
MW
582 if (credits < 0)
583 credits = max_t(int, -nfc_credits, credits);
584
c5ee6feb
MW
585 nfc_credits += credits;
586
8f57d478
MW
587 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
588 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
c5ee6feb 589
8f57d478 590 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
c5ee6feb
MW
591 port->config.nfc_credits |= nfc_credits;
592
520b6702 593 return tb_port_write(port, &port->config.nfc_credits,
8f57d478 594 TB_CFG_PORT, ADP_CS_4, 1);
520b6702
AN
595}
596
597/**
598 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
5c6b471b
MW
599 * @port: Port whose counters to clear
600 * @counter: Counter index to clear
520b6702
AN
601 *
602 * Return: Returns 0 on success or an error code on failure.
603 */
604int tb_port_clear_counter(struct tb_port *port, int counter)
605{
606 u32 zero[3] = { 0, 0, 0 };
62efe699 607 tb_port_dbg(port, "clearing counter %d\n", counter);
520b6702
AN
608 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
609}
610
b0407983
MW
611/**
612 * tb_port_unlock() - Unlock downstream port
613 * @port: Port to unlock
614 *
615 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
616 * downstream router accessible for CM.
617 */
618int tb_port_unlock(struct tb_port *port)
619{
620 if (tb_switch_is_icm(port->sw))
621 return 0;
622 if (!tb_port_is_null(port))
623 return -EINVAL;
624 if (tb_switch_is_usb4(port->sw))
625 return usb4_port_unlock(port);
626 return 0;
627}
628
341d4518
MW
629static int __tb_port_enable(struct tb_port *port, bool enable)
630{
631 int ret;
632 u32 phy;
633
634 if (!tb_port_is_null(port))
635 return -EINVAL;
636
637 ret = tb_port_read(port, &phy, TB_CFG_PORT,
638 port->cap_phy + LANE_ADP_CS_1, 1);
639 if (ret)
640 return ret;
641
642 if (enable)
643 phy &= ~LANE_ADP_CS_1_LD;
644 else
645 phy |= LANE_ADP_CS_1_LD;
646
90f720d2
MW
647
648 ret = tb_port_write(port, &phy, TB_CFG_PORT,
649 port->cap_phy + LANE_ADP_CS_1, 1);
650 if (ret)
651 return ret;
652
87fa05b6 653 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable));
90f720d2 654 return 0;
341d4518
MW
655}
656
657/**
658 * tb_port_enable() - Enable lane adapter
659 * @port: Port to enable (can be %NULL)
660 *
661 * This is used for lane 0 and 1 adapters to enable it.
662 */
663int tb_port_enable(struct tb_port *port)
664{
665 return __tb_port_enable(port, true);
666}
667
668/**
669 * tb_port_disable() - Disable lane adapter
670 * @port: Port to disable (can be %NULL)
671 *
672 * This is used for lane 0 and 1 adapters to disable it.
673 */
674int tb_port_disable(struct tb_port *port)
675{
676 return __tb_port_enable(port, false);
677}
678
01da6b99
S
679static int tb_port_reset(struct tb_port *port)
680{
681 if (tb_switch_is_usb4(port->sw))
682 return port->cap_usb4 ? usb4_port_reset(port) : 0;
683 return tb_lc_reset_port(port);
684}
685
47ba5ae4 686/*
a25c8b2f
AN
687 * tb_init_port() - initialize a port
688 *
689 * This is a helper method for tb_switch_alloc. Does not check or initialize
690 * any downstream switches.
691 *
692 * Return: Returns 0 on success or an error code on failure.
693 */
343fcb8c 694static int tb_init_port(struct tb_port *port)
a25c8b2f
AN
695{
696 int res;
9da672a4 697 int cap;
343fcb8c 698
fb7a89ad
SM
699 INIT_LIST_HEAD(&port->list);
700
701 /* Control adapter does not have configuration space */
702 if (!port->port)
703 return 0;
704
a25c8b2f 705 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
d94dcbb1
MW
706 if (res) {
707 if (res == -ENODEV) {
708 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
709 port->port);
8824d19b 710 port->disabled = true;
d94dcbb1
MW
711 return 0;
712 }
a25c8b2f 713 return res;
d94dcbb1 714 }
a25c8b2f 715
9da672a4 716 /* Port 0 is the switch itself and has no PHY. */
fb7a89ad 717 if (port->config.type == TB_TYPE_PORT) {
da2da04b 718 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
9da672a4
AN
719
720 if (cap > 0)
721 port->cap_phy = cap;
722 else
723 tb_port_WARN(port, "non switch port without a PHY\n");
b0407983
MW
724
725 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
726 if (cap > 0)
727 port->cap_usb4 = cap;
56ad3aef
MW
728
729 /*
730 * USB4 ports the buffers allocated for the control path
731 * can be read from the path config space. Legacy
732 * devices we use hard-coded value.
733 */
2ad3e131 734 if (port->cap_usb4) {
56ad3aef
MW
735 struct tb_regs_hop hop;
736
737 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
738 port->ctl_credits = hop.initial_credits;
739 }
740 if (!port->ctl_credits)
741 port->ctl_credits = 2;
742
fb7a89ad 743 } else {
56183c88
MW
744 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
745 if (cap > 0)
746 port->cap_adap = cap;
9da672a4
AN
747 }
748
56ad3aef
MW
749 port->total_credits =
750 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
751 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
752
753 tb_dump_port(port->sw->tb, port);
a25c8b2f 754 return 0;
a25c8b2f
AN
755}
756
0b2863ac
MW
757static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
758 int max_hopid)
759{
760 int port_max_hopid;
761 struct ida *ida;
762
763 if (in) {
764 port_max_hopid = port->config.max_in_hop_id;
765 ida = &port->in_hopids;
766 } else {
767 port_max_hopid = port->config.max_out_hop_id;
768 ida = &port->out_hopids;
769 }
770
12676423
MW
771 /*
772 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
773 * reserved.
774 */
a3cfebdc 775 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
0b2863ac
MW
776 min_hopid = TB_PATH_MIN_HOPID;
777
778 if (max_hopid < 0 || max_hopid > port_max_hopid)
779 max_hopid = port_max_hopid;
780
dec6a613 781 return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
0b2863ac
MW
782}
783
784/**
785 * tb_port_alloc_in_hopid() - Allocate input HopID from port
786 * @port: Port to allocate HopID for
787 * @min_hopid: Minimum acceptable input HopID
788 * @max_hopid: Maximum acceptable input HopID
789 *
790 * Return: HopID between @min_hopid and @max_hopid or negative errno in
791 * case of error.
792 */
793int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
794{
795 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
796}
797
798/**
799 * tb_port_alloc_out_hopid() - Allocate output HopID from port
800 * @port: Port to allocate HopID for
801 * @min_hopid: Minimum acceptable output HopID
802 * @max_hopid: Maximum acceptable output HopID
803 *
804 * Return: HopID between @min_hopid and @max_hopid or negative errno in
805 * case of error.
806 */
807int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
808{
809 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
810}
811
812/**
813 * tb_port_release_in_hopid() - Release allocated input HopID from port
814 * @port: Port whose HopID to release
815 * @hopid: HopID to release
816 */
817void tb_port_release_in_hopid(struct tb_port *port, int hopid)
818{
dec6a613 819 ida_free(&port->in_hopids, hopid);
0b2863ac
MW
820}
821
822/**
823 * tb_port_release_out_hopid() - Release allocated output HopID from port
824 * @port: Port whose HopID to release
825 * @hopid: HopID to release
826 */
827void tb_port_release_out_hopid(struct tb_port *port, int hopid)
828{
dec6a613 829 ida_free(&port->out_hopids, hopid);
0b2863ac
MW
830}
831
69eb79f7
MW
832static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
833 const struct tb_switch *sw)
834{
835 u64 mask = (1ULL << parent->config.depth * 8) - 1;
836 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
837}
838
fb19fac1
MW
839/**
840 * tb_next_port_on_path() - Return next port for given port on a path
841 * @start: Start port of the walk
842 * @end: End port of the walk
843 * @prev: Previous port (%NULL if this is the first)
844 *
845 * This function can be used to walk from one port to another if they
846 * are connected through zero or more switches. If the @prev is dual
847 * link port, the function follows that link and returns another end on
848 * that same link.
849 *
850 * If the @end port has been reached, return %NULL.
851 *
852 * Domain tb->lock must be held when this function is called.
853 */
854struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
855 struct tb_port *prev)
856{
857 struct tb_port *next;
858
859 if (!prev)
860 return start;
861
862 if (prev->sw == end->sw) {
863 if (prev == end)
864 return NULL;
865 return end;
866 }
867
69eb79f7
MW
868 if (tb_switch_is_reachable(prev->sw, end->sw)) {
869 next = tb_port_at(tb_route(end->sw), prev->sw);
870 /* Walk down the topology if next == prev */
fb19fac1 871 if (prev->remote &&
69eb79f7 872 (next == prev || next->dual_link_port == prev))
fb19fac1 873 next = prev->remote;
fb19fac1
MW
874 } else {
875 if (tb_is_upstream_port(prev)) {
876 next = prev->remote;
877 } else {
878 next = tb_upstream_port(prev->sw);
879 /*
880 * Keep the same link if prev and next are both
881 * dual link ports.
882 */
883 if (next->dual_link_port &&
884 next->link_nr != prev->link_nr) {
885 next = next->dual_link_port;
886 }
887 }
888 }
889
69eb79f7 890 return next != prev ? next : NULL;
fb19fac1
MW
891}
892
5b7b8c0a
MW
893/**
894 * tb_port_get_link_speed() - Get current link speed
895 * @port: Port to check (USB4 or CIO)
896 *
897 * Returns link speed in Gb/s or negative errno in case of failure.
898 */
899int tb_port_get_link_speed(struct tb_port *port)
91c0c120
MW
900{
901 u32 val, speed;
902 int ret;
903
904 if (!port->cap_phy)
905 return -EINVAL;
906
907 ret = tb_port_read(port, &val, TB_CFG_PORT,
908 port->cap_phy + LANE_ADP_CS_1, 1);
909 if (ret)
910 return ret;
911
912 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
913 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
e111fb92
GF
914
915 switch (speed) {
916 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
917 return 40;
918 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
919 return 20;
920 default:
921 return 10;
922 }
91c0c120
MW
923}
924
aa673d60
GF
925/**
926 * tb_port_get_link_generation() - Returns link generation
927 * @port: Lane adapter
928 *
929 * Returns link generation as number or negative errno in case of
930 * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
931 * links so for those always returns 2.
932 */
933int tb_port_get_link_generation(struct tb_port *port)
934{
935 int ret;
936
937 ret = tb_port_get_link_speed(port);
938 if (ret < 0)
939 return ret;
940
941 switch (ret) {
942 case 40:
943 return 4;
944 case 20:
945 return 3;
946 default:
947 return 2;
948 }
949}
950
4210d50f
IH
951/**
952 * tb_port_get_link_width() - Get current link width
953 * @port: Port to check (USB4 or CIO)
954 *
e111fb92
GF
955 * Returns link width. Return the link width as encoded in &enum
956 * tb_link_width or negative errno in case of failure.
4210d50f
IH
957 */
958int tb_port_get_link_width(struct tb_port *port)
91c0c120
MW
959{
960 u32 val;
961 int ret;
962
963 if (!port->cap_phy)
964 return -EINVAL;
965
966 ret = tb_port_read(port, &val, TB_CFG_PORT,
967 port->cap_phy + LANE_ADP_CS_1, 1);
968 if (ret)
969 return ret;
970
e111fb92 971 /* Matches the values in enum tb_link_width */
91c0c120
MW
972 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
973 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
974}
975
81af2952
GF
976/**
977 * tb_port_width_supported() - Is the given link width supported
978 * @port: Port to check
979 * @width: Widths to check (bitmask)
980 *
981 * Can be called to any lane adapter. Checks if given @width is
982 * supported by the hardware and returns %true if it is.
983 */
984bool tb_port_width_supported(struct tb_port *port, unsigned int width)
91c0c120
MW
985{
986 u32 phy, widths;
987 int ret;
988
989 if (!port->cap_phy)
990 return false;
991
81af2952
GF
992 if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
993 if (tb_port_get_link_generation(port) < 4 ||
994 !usb4_port_asym_supported(port))
995 return false;
996 }
997
91c0c120
MW
998 ret = tb_port_read(port, &phy, TB_CFG_PORT,
999 port->cap_phy + LANE_ADP_CS_0, 1);
1000 if (ret)
e9d0e751 1001 return false;
91c0c120 1002
81af2952
GF
1003 /*
1004 * The field encoding is the same as &enum tb_link_width (which is
1005 * passed to @width).
1006 */
1007 widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
1008 return widths & width;
91c0c120
MW
1009}
1010
0e14dd5e
MW
1011/**
1012 * tb_port_set_link_width() - Set target link width of the lane adapter
1013 * @port: Lane adapter
e111fb92 1014 * @width: Target link width
0e14dd5e
MW
1015 *
1016 * Sets the target link width of the lane adapter to @width. Does not
1017 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1018 *
1019 * Return: %0 in case of success and negative errno in case of error
1020 */
e111fb92 1021int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
91c0c120
MW
1022{
1023 u32 val;
1024 int ret;
1025
1026 if (!port->cap_phy)
1027 return -EINVAL;
1028
1029 ret = tb_port_read(port, &val, TB_CFG_PORT,
1030 port->cap_phy + LANE_ADP_CS_1, 1);
1031 if (ret)
1032 return ret;
1033
1034 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1035 switch (width) {
e111fb92
GF
1036 case TB_LINK_WIDTH_SINGLE:
1037 /* Gen 4 link cannot be single */
aa673d60 1038 if (tb_port_get_link_generation(port) >= 4)
e111fb92 1039 return -EOPNOTSUPP;
91c0c120
MW
1040 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1041 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1042 break;
81af2952 1043
e111fb92 1044 case TB_LINK_WIDTH_DUAL:
81af2952
GF
1045 if (tb_port_get_link_generation(port) >= 4)
1046 return usb4_port_asym_set_link_width(port, width);
91c0c120
MW
1047 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1048 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1049 break;
81af2952
GF
1050
1051 case TB_LINK_WIDTH_ASYM_TX:
1052 case TB_LINK_WIDTH_ASYM_RX:
1053 return usb4_port_asym_set_link_width(port, width);
1054
91c0c120
MW
1055 default:
1056 return -EINVAL;
1057 }
1058
91c0c120
MW
1059 return tb_port_write(port, &val, TB_CFG_PORT,
1060 port->cap_phy + LANE_ADP_CS_1, 1);
1061}
1062
0e14dd5e
MW
1063/**
1064 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1065 * @port: Lane adapter
1066 * @bonding: enable/disable bonding
1067 *
1068 * Enables or disables lane bonding. This should be called after target
1069 * link width has been set (tb_port_set_link_width()). Note in most
1070 * cases one should use tb_port_lane_bonding_enable() instead to enable
1071 * lane bonding.
1072 *
0e14dd5e
MW
1073 * Return: %0 in case of success and negative errno in case of error
1074 */
e111fb92 1075static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
0e14dd5e
MW
1076{
1077 u32 val;
1078 int ret;
1079
1080 if (!port->cap_phy)
1081 return -EINVAL;
1082
1083 ret = tb_port_read(port, &val, TB_CFG_PORT,
1084 port->cap_phy + LANE_ADP_CS_1, 1);
1085 if (ret)
1086 return ret;
1087
1088 if (bonding)
1089 val |= LANE_ADP_CS_1_LB;
1090 else
1091 val &= ~LANE_ADP_CS_1_LB;
1092
e111fb92
GF
1093 return tb_port_write(port, &val, TB_CFG_PORT,
1094 port->cap_phy + LANE_ADP_CS_1, 1);
0e14dd5e
MW
1095}
1096
5cc0df9c
IH
1097/**
1098 * tb_port_lane_bonding_enable() - Enable bonding on port
1099 * @port: port to enable
1100 *
e7051bea
MW
1101 * Enable bonding by setting the link width of the port and the other
1102 * port in case of dual link port. Does not wait for the link to
1103 * actually reach the bonded state so caller needs to call
1104 * tb_port_wait_for_link_width() before enabling any paths through the
1105 * link to make sure the link is in expected state.
5cc0df9c
IH
1106 *
1107 * Return: %0 in case of success and negative errno in case of error
1108 */
1109int tb_port_lane_bonding_enable(struct tb_port *port)
91c0c120 1110{
e111fb92 1111 enum tb_link_width width;
91c0c120
MW
1112 int ret;
1113
1114 /*
1115 * Enable lane bonding for both links if not already enabled by
1116 * for example the boot firmware.
1117 */
e111fb92
GF
1118 width = tb_port_get_link_width(port);
1119 if (width == TB_LINK_WIDTH_SINGLE) {
1120 ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
91c0c120 1121 if (ret)
0e14dd5e 1122 goto err_lane0;
91c0c120
MW
1123 }
1124
e111fb92
GF
1125 width = tb_port_get_link_width(port->dual_link_port);
1126 if (width == TB_LINK_WIDTH_SINGLE) {
1127 ret = tb_port_set_link_width(port->dual_link_port,
1128 TB_LINK_WIDTH_DUAL);
0e14dd5e 1129 if (ret)
e8f1297b 1130 goto err_lane1;
91c0c120
MW
1131 }
1132
e111fb92
GF
1133 /*
1134 * Only set bonding if the link was not already bonded. This
1135 * avoids the lane adapter to re-enter bonding state.
1136 */
24d85bb3 1137 if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
e111fb92
GF
1138 ret = tb_port_set_lane_bonding(port, true);
1139 if (ret)
1140 goto err_lane1;
1141 }
1142
1143 /*
1144 * When lane 0 bonding is set it will affect lane 1 too so
1145 * update both.
1146 */
1147 port->bonded = true;
1148 port->dual_link_port->bonded = true;
91c0c120
MW
1149
1150 return 0;
0e14dd5e
MW
1151
1152err_lane1:
e111fb92 1153 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
0e14dd5e 1154err_lane0:
e111fb92
GF
1155 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1156
0e14dd5e 1157 return ret;
91c0c120
MW
1158}
1159
5cc0df9c
IH
1160/**
1161 * tb_port_lane_bonding_disable() - Disable bonding on port
1162 * @port: port to disable
1163 *
1164 * Disable bonding by setting the link width of the port and the
1165 * other port in case of dual link port.
5cc0df9c
IH
1166 */
1167void tb_port_lane_bonding_disable(struct tb_port *port)
91c0c120 1168{
0e14dd5e 1169 tb_port_set_lane_bonding(port, false);
e111fb92
GF
1170 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
1171 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1172 port->dual_link_port->bonded = false;
1173 port->bonded = false;
91c0c120
MW
1174}
1175
e7051bea
MW
1176/**
1177 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1178 * @port: Port to wait for
81af2952 1179 * @width: Expected link width (bitmask)
e7051bea
MW
1180 * @timeout_msec: Timeout in ms how long to wait
1181 *
1182 * Should be used after both ends of the link have been bonded (or
1183 * bonding has been disabled) to wait until the link actually reaches
e111fb92
GF
1184 * the expected state. Returns %-ETIMEDOUT if the width was not reached
1185 * within the given timeout, %0 if it did. Can be passed a mask of
1186 * expected widths and succeeds if any of the widths is reached.
e7051bea 1187 */
81af2952 1188int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
e7051bea
MW
1189 int timeout_msec)
1190{
1191 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1192 int ret;
1193
e111fb92 1194 /* Gen 4 link does not support single lane */
81af2952 1195 if ((width & TB_LINK_WIDTH_SINGLE) &&
aa673d60 1196 tb_port_get_link_generation(port) >= 4)
e111fb92
GF
1197 return -EOPNOTSUPP;
1198
e7051bea
MW
1199 do {
1200 ret = tb_port_get_link_width(port);
0a2e1667
MW
1201 if (ret < 0) {
1202 /*
1203 * Sometimes we get port locked error when
1204 * polling the lanes so we can ignore it and
1205 * retry.
1206 */
1207 if (ret != -EACCES)
1208 return ret;
81af2952 1209 } else if (ret & width) {
e7051bea 1210 return 0;
0a2e1667 1211 }
e7051bea
MW
1212
1213 usleep_range(1000, 2000);
1214 } while (ktime_before(ktime_get(), timeout));
1215
1216 return -ETIMEDOUT;
1217}
1218
69fea377
MW
1219static int tb_port_do_update_credits(struct tb_port *port)
1220{
1221 u32 nfc_credits;
1222 int ret;
1223
1224 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1225 if (ret)
1226 return ret;
1227
1228 if (nfc_credits != port->config.nfc_credits) {
1229 u32 total;
1230
1231 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1232 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1233
1234 tb_port_dbg(port, "total credits changed %u -> %u\n",
1235 port->total_credits, total);
1236
1237 port->config.nfc_credits = nfc_credits;
1238 port->total_credits = total;
1239 }
1240
1241 return 0;
1242}
1243
1244/**
1245 * tb_port_update_credits() - Re-read port total credits
1246 * @port: Port to update
1247 *
1248 * After the link is bonded (or bonding was disabled) the port total
1249 * credits may change, so this function needs to be called to re-read
1250 * the credits. Updates also the second lane adapter.
1251 */
1252int tb_port_update_credits(struct tb_port *port)
1253{
1254 int ret;
1255
1256 ret = tb_port_do_update_credits(port);
1257 if (ret)
1258 return ret;
1259 return tb_port_do_update_credits(port->dual_link_port);
1260}
1261
fdb0887c
MW
1262static int tb_port_start_lane_initialization(struct tb_port *port)
1263{
1264 int ret;
1265
1266 if (tb_switch_is_usb4(port->sw))
1267 return 0;
1268
1269 ret = tb_lc_start_lane_initialization(port);
1270 return ret == -EINVAL ? 0 : ret;
1271}
1272
3fb10ea4
RM
1273/*
1274 * Returns true if the port had something (router, XDomain) connected
1275 * before suspend.
1276 */
1277static bool tb_port_resume(struct tb_port *port)
1278{
1279 bool has_remote = tb_port_has_remote(port);
1280
1281 if (port->usb4) {
1282 usb4_port_device_resume(port->usb4);
1283 } else if (!has_remote) {
1284 /*
1285 * For disconnected downstream lane adapters start lane
1286 * initialization now so we detect future connects.
1287 *
1288 * For XDomain start the lane initialzation now so the
1289 * link gets re-established.
1290 *
1291 * This is only needed for non-USB4 ports.
1292 */
1293 if (!tb_is_upstream_port(port) || port->xdomain)
1294 tb_port_start_lane_initialization(port);
1295 }
1296
1297 return has_remote || port->xdomain;
1298}
1299
e78db6f0
MW
1300/**
1301 * tb_port_is_enabled() - Is the adapter port enabled
1302 * @port: Port to check
1303 */
1304bool tb_port_is_enabled(struct tb_port *port)
1305{
1306 switch (port->config.type) {
1307 case TB_TYPE_PCIE_UP:
1308 case TB_TYPE_PCIE_DOWN:
1309 return tb_pci_port_is_enabled(port);
1310
4f807e47
MW
1311 case TB_TYPE_DP_HDMI_IN:
1312 case TB_TYPE_DP_HDMI_OUT:
1313 return tb_dp_port_is_enabled(port);
1314
e6f81858
RM
1315 case TB_TYPE_USB3_UP:
1316 case TB_TYPE_USB3_DOWN:
1317 return tb_usb3_port_is_enabled(port);
1318
e78db6f0
MW
1319 default:
1320 return false;
1321 }
1322}
1323
e6f81858
RM
1324/**
1325 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1326 * @port: USB3 adapter port to check
1327 */
1328bool tb_usb3_port_is_enabled(struct tb_port *port)
1329{
1330 u32 data;
1331
1332 if (tb_port_read(port, &data, TB_CFG_PORT,
1333 port->cap_adap + ADP_USB3_CS_0, 1))
1334 return false;
1335
1336 return !!(data & ADP_USB3_CS_0_PE);
1337}
1338
1339/**
1340 * tb_usb3_port_enable() - Enable USB3 adapter port
1341 * @port: USB3 adapter port to enable
1342 * @enable: Enable/disable the USB3 adapter
1343 */
1344int tb_usb3_port_enable(struct tb_port *port, bool enable)
1345{
1346 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1347 : ADP_USB3_CS_0_V;
1348
1349 if (!port->cap_adap)
1350 return -ENXIO;
1351 return tb_port_write(port, &word, TB_CFG_PORT,
1352 port->cap_adap + ADP_USB3_CS_0, 1);
1353}
1354
0414bec5
MW
1355/**
1356 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1357 * @port: PCIe port to check
1358 */
1359bool tb_pci_port_is_enabled(struct tb_port *port)
1360{
1361 u32 data;
1362
778bfca3
MW
1363 if (tb_port_read(port, &data, TB_CFG_PORT,
1364 port->cap_adap + ADP_PCIE_CS_0, 1))
0414bec5
MW
1365 return false;
1366
778bfca3 1367 return !!(data & ADP_PCIE_CS_0_PE);
0414bec5
MW
1368}
1369
93f36ade
MW
1370/**
1371 * tb_pci_port_enable() - Enable PCIe adapter port
1372 * @port: PCIe port to enable
1373 * @enable: Enable/disable the PCIe adapter
1374 */
1375int tb_pci_port_enable(struct tb_port *port, bool enable)
1376{
778bfca3 1377 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
93f36ade
MW
1378 if (!port->cap_adap)
1379 return -ENXIO;
778bfca3
MW
1380 return tb_port_write(port, &word, TB_CFG_PORT,
1381 port->cap_adap + ADP_PCIE_CS_0, 1);
93f36ade
MW
1382}
1383
4f807e47
MW
1384/**
1385 * tb_dp_port_hpd_is_active() - Is HPD already active
1386 * @port: DP out port to check
1387 *
6ed0b900 1388 * Checks if the DP OUT adapter port has HPD bit already set.
4f807e47
MW
1389 */
1390int tb_dp_port_hpd_is_active(struct tb_port *port)
1391{
1392 u32 data;
1393 int ret;
1394
98176380
MW
1395 ret = tb_port_read(port, &data, TB_CFG_PORT,
1396 port->cap_adap + ADP_DP_CS_2, 1);
4f807e47
MW
1397 if (ret)
1398 return ret;
1399
6ed0b900 1400 return !!(data & ADP_DP_CS_2_HPD);
4f807e47
MW
1401}
1402
1403/**
1404 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1405 * @port: Port to clear HPD
1406 *
6ed0b900 1407 * If the DP IN port has HPD set, this function can be used to clear it.
4f807e47
MW
1408 */
1409int tb_dp_port_hpd_clear(struct tb_port *port)
1410{
1411 u32 data;
1412 int ret;
1413
98176380
MW
1414 ret = tb_port_read(port, &data, TB_CFG_PORT,
1415 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1416 if (ret)
1417 return ret;
1418
6ed0b900 1419 data |= ADP_DP_CS_3_HPDC;
98176380
MW
1420 return tb_port_write(port, &data, TB_CFG_PORT,
1421 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1422}
1423
1424/**
1425 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1426 * @port: DP IN/OUT port to set hops
1427 * @video: Video Hop ID
1428 * @aux_tx: AUX TX Hop ID
1429 * @aux_rx: AUX RX Hop ID
1430 *
e5bb88e9
MW
1431 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1432 * router DP adapters too but does not program the values as the fields
1433 * are read-only.
4f807e47
MW
1434 */
1435int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1436 unsigned int aux_tx, unsigned int aux_rx)
1437{
1438 u32 data[2];
1439 int ret;
1440
e5bb88e9
MW
1441 if (tb_switch_is_usb4(port->sw))
1442 return 0;
1443
98176380
MW
1444 ret = tb_port_read(port, data, TB_CFG_PORT,
1445 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1446 if (ret)
1447 return ret;
1448
98176380
MW
1449 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1450 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1451 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1452
98176380
MW
1453 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1454 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1455 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1456 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1457 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1458
98176380
MW
1459 return tb_port_write(port, data, TB_CFG_PORT,
1460 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1461}
1462
1463/**
1464 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1465 * @port: DP adapter port to check
1466 */
1467bool tb_dp_port_is_enabled(struct tb_port *port)
1468{
fd5c46b7 1469 u32 data[2];
4f807e47 1470
98176380 1471 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
fd5c46b7 1472 ARRAY_SIZE(data)))
4f807e47
MW
1473 return false;
1474
98176380 1475 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
4f807e47
MW
1476}
1477
1478/**
1479 * tb_dp_port_enable() - Enables/disables DP paths of a port
1480 * @port: DP IN/OUT port
1481 * @enable: Enable/disable DP path
1482 *
1483 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1484 * calling this function.
1485 */
1486int tb_dp_port_enable(struct tb_port *port, bool enable)
1487{
fd5c46b7 1488 u32 data[2];
4f807e47
MW
1489 int ret;
1490
98176380
MW
1491 ret = tb_port_read(port, data, TB_CFG_PORT,
1492 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1493 if (ret)
1494 return ret;
1495
1496 if (enable)
98176380 1497 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
4f807e47 1498 else
98176380 1499 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
4f807e47 1500
98176380
MW
1501 return tb_port_write(port, data, TB_CFG_PORT,
1502 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1503}
1504
a25c8b2f
AN
1505/* switch utility functions */
1506
b0407983
MW
1507static const char *tb_switch_generation_name(const struct tb_switch *sw)
1508{
1509 switch (sw->generation) {
1510 case 1:
1511 return "Thunderbolt 1";
1512 case 2:
1513 return "Thunderbolt 2";
1514 case 3:
1515 return "Thunderbolt 3";
1516 case 4:
1517 return "USB4";
1518 default:
1519 return "Unknown";
1520 }
1521}
1522
1523static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
a25c8b2f 1524{
b0407983
MW
1525 const struct tb_regs_switch_header *regs = &sw->config;
1526
1527 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1528 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1529 regs->revision, regs->thunderbolt_version);
1530 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
daa5140f
MW
1531 tb_dbg(tb, " Config:\n");
1532 tb_dbg(tb,
a25c8b2f 1533 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
b0407983
MW
1534 regs->upstream_port_number, regs->depth,
1535 (((u64) regs->route_hi) << 32) | regs->route_lo,
1536 regs->enabled, regs->plug_events_delay);
daa5140f 1537 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
b0407983 1538 regs->__unknown1, regs->__unknown4);
a25c8b2f
AN
1539}
1540
ec8162b3
S
1541static int tb_switch_reset_host(struct tb_switch *sw)
1542{
1543 if (sw->generation > 1) {
1544 struct tb_port *port;
1545
1546 tb_switch_for_each_port(sw, port) {
1547 int i, ret;
1548
1549 /*
1550 * For lane adapters we issue downstream port
1551 * reset and clear up path config spaces.
1552 *
1553 * For protocol adapters we disable the path and
1554 * clear path config space one by one (from 8 to
1555 * Max Input HopID of the adapter).
1556 */
1557 if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
1558 ret = tb_port_reset(port);
1559 if (ret)
1560 return ret;
1561 } else if (tb_port_is_usb3_down(port) ||
1562 tb_port_is_usb3_up(port)) {
1563 tb_usb3_port_enable(port, false);
1564 } else if (tb_port_is_dpin(port) ||
1565 tb_port_is_dpout(port)) {
1566 tb_dp_port_enable(port, false);
1567 } else if (tb_port_is_pcie_down(port) ||
1568 tb_port_is_pcie_up(port)) {
1569 tb_pci_port_enable(port, false);
1570 } else {
1571 continue;
1572 }
1573
1574 /* Cleanup path config space of protocol adapter */
1575 for (i = TB_PATH_MIN_HOPID;
1576 i <= port->config.max_in_hop_id; i++) {
1577 ret = tb_path_deactivate_hop(port, i);
1578 if (ret)
1579 return ret;
1580 }
1581 }
1582 } else {
1583 struct tb_cfg_result res;
1584
1585 /* Thunderbolt 1 uses the "reset" config space packet */
1586 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1587 TB_CFG_SWITCH, 2, 2);
1588 if (res.err)
1589 return res.err;
1590 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1591 if (res.err > 0)
1592 return -EIO;
1593 else if (res.err < 0)
1594 return res.err;
1595 }
1596
1597 return 0;
1598}
1599
1600static int tb_switch_reset_device(struct tb_switch *sw)
1601{
1602 return tb_port_reset(tb_switch_downstream_port(sw));
1603}
1604
1605static bool tb_switch_enumerated(struct tb_switch *sw)
1606{
1607 u32 val;
1608 int ret;
1609
1610 /*
1611 * Read directly from the hardware because we use this also
1612 * during system sleep where sw->config.enabled is already set
1613 * by us.
1614 */
1615 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
1616 if (ret)
1617 return false;
1618
1619 return !!(val & ROUTER_CS_3_V);
1620}
1621
23dd5bb4 1622/**
ec8162b3
S
1623 * tb_switch_reset() - Perform reset to the router
1624 * @sw: Router to reset
23dd5bb4 1625 *
ec8162b3
S
1626 * Issues reset to the router @sw. Can be used for any router. For host
1627 * routers, resets all the downstream ports and cleans up path config
1628 * spaces accordingly. For device routers issues downstream port reset
1629 * through the parent router, so as side effect there will be unplug
1630 * soon after this is finished.
1631 *
1632 * If the router is not enumerated does nothing.
1633 *
1634 * Returns %0 on success or negative errno in case of failure.
23dd5bb4 1635 */
356b6c4e 1636int tb_switch_reset(struct tb_switch *sw)
23dd5bb4 1637{
ec8162b3 1638 int ret;
356b6c4e 1639
ec8162b3
S
1640 /*
1641 * We cannot access the port config spaces unless the router is
1642 * already enumerated. If the router is not enumerated it is
1643 * equal to being reset so we can skip that here.
1644 */
1645 if (!tb_switch_enumerated(sw))
356b6c4e
MW
1646 return 0;
1647
ec8162b3 1648 tb_sw_dbg(sw, "resetting\n");
356b6c4e 1649
ec8162b3
S
1650 if (tb_route(sw))
1651 ret = tb_switch_reset_device(sw);
1652 else
1653 ret = tb_switch_reset_host(sw);
1654
1655 if (ret)
1656 tb_sw_warn(sw, "failed to reset\n");
1657
1658 return ret;
23dd5bb4
AN
1659}
1660
1639664f
GF
1661/**
1662 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1663 * @sw: Router to read the offset value from
1664 * @offset: Offset in the router config space to read from
1665 * @bit: Bit mask in the offset to wait for
1666 * @value: Value of the bits to wait for
1667 * @timeout_msec: Timeout in ms how long to wait
1668 *
1669 * Wait till the specified bits in specified offset reach specified value.
1670 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1671 * within the given timeout or a negative errno in case of failure.
1672 */
1673int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1674 u32 value, int timeout_msec)
1675{
1676 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1677
1678 do {
1679 u32 val;
1680 int ret;
1681
1682 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1683 if (ret)
1684 return ret;
1685
1686 if ((val & bit) == value)
1687 return 0;
1688
1689 usleep_range(50, 100);
1690 } while (ktime_before(ktime_get(), timeout));
1691
1692 return -ETIMEDOUT;
1693}
1694
47ba5ae4 1695/*
ca389f71
AN
1696 * tb_plug_events_active() - enable/disable plug events on a switch
1697 *
1698 * Also configures a sane plug_events_delay of 255ms.
1699 *
1700 * Return: Returns 0 on success or an error code on failure.
1701 */
1702static int tb_plug_events_active(struct tb_switch *sw, bool active)
1703{
1704 u32 data;
1705 int res;
1706
5cb6ed31 1707 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
bfe778ac
MW
1708 return 0;
1709
ca389f71
AN
1710 sw->config.plug_events_delay = 0xff;
1711 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1712 if (res)
1713 return res;
1714
1715 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1716 if (res)
1717 return res;
1718
1719 if (active) {
1720 data = data & 0xFFFFFF83;
1721 switch (sw->config.device_id) {
1d111406
LW
1722 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1723 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1724 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
ca389f71
AN
1725 break;
1726 default:
30a4eca6
MW
1727 /*
1728 * Skip Alpine Ridge, it needs to have vendor
1729 * specific USB hotplug event enabled for the
1730 * internal xHCI to work.
1731 */
1732 if (!tb_switch_is_alpine_ridge(sw))
1733 data |= TB_PLUG_EVENTS_USB_DISABLE;
ca389f71
AN
1734 }
1735 } else {
1736 data = data | 0x7c;
1737 }
1738 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1739 sw->cap_plug_events + 1, 1);
1740}
1741
f67cf491
MW
1742static ssize_t authorized_show(struct device *dev,
1743 struct device_attribute *attr,
1744 char *buf)
1745{
1746 struct tb_switch *sw = tb_to_switch(dev);
1747
8283fb57 1748 return sysfs_emit(buf, "%u\n", sw->authorized);
f67cf491
MW
1749}
1750
3da88be2
MW
1751static int disapprove_switch(struct device *dev, void *not_used)
1752{
1651d9e7 1753 char *envp[] = { "AUTHORIZED=0", NULL };
3da88be2
MW
1754 struct tb_switch *sw;
1755
1756 sw = tb_to_switch(dev);
1757 if (sw && sw->authorized) {
1758 int ret;
1759
1760 /* First children */
1761 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1762 if (ret)
1763 return ret;
1764
1765 ret = tb_domain_disapprove_switch(sw->tb, sw);
1766 if (ret)
1767 return ret;
1768
1769 sw->authorized = 0;
1651d9e7 1770 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
3da88be2
MW
1771 }
1772
1773 return 0;
1774}
1775
f67cf491
MW
1776static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1777{
1651d9e7 1778 char envp_string[13];
f67cf491 1779 int ret = -EINVAL;
1651d9e7 1780 char *envp[] = { envp_string, NULL };
f67cf491 1781
09f11b6c
MW
1782 if (!mutex_trylock(&sw->tb->lock))
1783 return restart_syscall();
f67cf491 1784
3da88be2 1785 if (!!sw->authorized == !!val)
f67cf491
MW
1786 goto unlock;
1787
1788 switch (val) {
3da88be2
MW
1789 /* Disapprove switch */
1790 case 0:
1791 if (tb_route(sw)) {
1792 ret = disapprove_switch(&sw->dev, NULL);
1793 goto unlock;
1794 }
1795 break;
1796
f67cf491
MW
1797 /* Approve switch */
1798 case 1:
1799 if (sw->key)
1800 ret = tb_domain_approve_switch_key(sw->tb, sw);
1801 else
1802 ret = tb_domain_approve_switch(sw->tb, sw);
1803 break;
1804
1805 /* Challenge switch */
1806 case 2:
1807 if (sw->key)
1808 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1809 break;
1810
1811 default:
1812 break;
1813 }
1814
1815 if (!ret) {
1816 sw->authorized = val;
1651d9e7
RJ
1817 /*
1818 * Notify status change to the userspace, informing the new
1819 * value of /sys/bus/thunderbolt/devices/.../authorized.
1820 */
1821 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1822 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
f67cf491
MW
1823 }
1824
1825unlock:
09f11b6c 1826 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1827 return ret;
1828}
1829
1830static ssize_t authorized_store(struct device *dev,
1831 struct device_attribute *attr,
1832 const char *buf, size_t count)
1833{
1834 struct tb_switch *sw = tb_to_switch(dev);
1835 unsigned int val;
1836 ssize_t ret;
1837
1838 ret = kstrtouint(buf, 0, &val);
1839 if (ret)
1840 return ret;
1841 if (val > 2)
1842 return -EINVAL;
1843
4f7c2e0d 1844 pm_runtime_get_sync(&sw->dev);
f67cf491 1845 ret = tb_switch_set_authorized(sw, val);
4f7c2e0d
MW
1846 pm_runtime_mark_last_busy(&sw->dev);
1847 pm_runtime_put_autosuspend(&sw->dev);
f67cf491
MW
1848
1849 return ret ? ret : count;
1850}
1851static DEVICE_ATTR_RW(authorized);
1852
14862ee3
YB
1853static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1854 char *buf)
1855{
1856 struct tb_switch *sw = tb_to_switch(dev);
1857
8283fb57 1858 return sysfs_emit(buf, "%u\n", sw->boot);
14862ee3
YB
1859}
1860static DEVICE_ATTR_RO(boot);
1861
bfe778ac
MW
1862static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1863 char *buf)
1864{
1865 struct tb_switch *sw = tb_to_switch(dev);
ca389f71 1866
8283fb57 1867 return sysfs_emit(buf, "%#x\n", sw->device);
bfe778ac
MW
1868}
1869static DEVICE_ATTR_RO(device);
1870
72ee3390
MW
1871static ssize_t
1872device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1873{
1874 struct tb_switch *sw = tb_to_switch(dev);
1875
8283fb57 1876 return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
72ee3390
MW
1877}
1878static DEVICE_ATTR_RO(device_name);
1879
b406357c
CK
1880static ssize_t
1881generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1882{
1883 struct tb_switch *sw = tb_to_switch(dev);
1884
8283fb57 1885 return sysfs_emit(buf, "%u\n", sw->generation);
b406357c
CK
1886}
1887static DEVICE_ATTR_RO(generation);
1888
f67cf491
MW
1889static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1890 char *buf)
1891{
1892 struct tb_switch *sw = tb_to_switch(dev);
1893 ssize_t ret;
1894
09f11b6c
MW
1895 if (!mutex_trylock(&sw->tb->lock))
1896 return restart_syscall();
f67cf491
MW
1897
1898 if (sw->key)
8283fb57 1899 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
f67cf491 1900 else
8283fb57 1901 ret = sysfs_emit(buf, "\n");
f67cf491 1902
09f11b6c 1903 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1904 return ret;
1905}
1906
1907static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1908 const char *buf, size_t count)
1909{
1910 struct tb_switch *sw = tb_to_switch(dev);
1911 u8 key[TB_SWITCH_KEY_SIZE];
1912 ssize_t ret = count;
e545f0d8 1913 bool clear = false;
f67cf491 1914
e545f0d8
BY
1915 if (!strcmp(buf, "\n"))
1916 clear = true;
1917 else if (hex2bin(key, buf, sizeof(key)))
f67cf491
MW
1918 return -EINVAL;
1919
09f11b6c
MW
1920 if (!mutex_trylock(&sw->tb->lock))
1921 return restart_syscall();
f67cf491
MW
1922
1923 if (sw->authorized) {
1924 ret = -EBUSY;
1925 } else {
1926 kfree(sw->key);
e545f0d8
BY
1927 if (clear) {
1928 sw->key = NULL;
1929 } else {
1930 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1931 if (!sw->key)
1932 ret = -ENOMEM;
1933 }
f67cf491
MW
1934 }
1935
09f11b6c 1936 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1937 return ret;
1938}
0956e411 1939static DEVICE_ATTR(key, 0600, key_show, key_store);
f67cf491 1940
91c0c120
MW
1941static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1942 char *buf)
1943{
1944 struct tb_switch *sw = tb_to_switch(dev);
1945
8283fb57 1946 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
91c0c120
MW
1947}
1948
1949/*
1950 * Currently all lanes must run at the same speed but we expose here
1951 * both directions to allow possible asymmetric links in the future.
1952 */
1953static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1954static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1955
e111fb92
GF
1956static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
1957 char *buf)
91c0c120
MW
1958{
1959 struct tb_switch *sw = tb_to_switch(dev);
e111fb92
GF
1960 unsigned int width;
1961
1962 switch (sw->link_width) {
1963 case TB_LINK_WIDTH_SINGLE:
1964 case TB_LINK_WIDTH_ASYM_TX:
1965 width = 1;
1966 break;
1967 case TB_LINK_WIDTH_DUAL:
1968 width = 2;
1969 break;
1970 case TB_LINK_WIDTH_ASYM_RX:
1971 width = 3;
1972 break;
1973 default:
1974 WARN_ON_ONCE(1);
1975 return -EINVAL;
1976 }
91c0c120 1977
e111fb92 1978 return sysfs_emit(buf, "%u\n", width);
91c0c120 1979}
e111fb92 1980static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
91c0c120 1981
e111fb92
GF
1982static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
1983 char *buf)
1984{
1985 struct tb_switch *sw = tb_to_switch(dev);
1986 unsigned int width;
1987
1988 switch (sw->link_width) {
1989 case TB_LINK_WIDTH_SINGLE:
1990 case TB_LINK_WIDTH_ASYM_RX:
1991 width = 1;
1992 break;
1993 case TB_LINK_WIDTH_DUAL:
1994 width = 2;
1995 break;
1996 case TB_LINK_WIDTH_ASYM_TX:
1997 width = 3;
1998 break;
1999 default:
2000 WARN_ON_ONCE(1);
2001 return -EINVAL;
2002 }
2003
2004 return sysfs_emit(buf, "%u\n", width);
2005}
2006static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
91c0c120 2007
e6b245cc
MW
2008static ssize_t nvm_authenticate_show(struct device *dev,
2009 struct device_attribute *attr, char *buf)
2010{
2011 struct tb_switch *sw = tb_to_switch(dev);
2012 u32 status;
2013
2014 nvm_get_auth_status(sw, &status);
8283fb57 2015 return sysfs_emit(buf, "%#x\n", status);
e6b245cc
MW
2016}
2017
1cb36293
ML
2018static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
2019 bool disconnect)
e6b245cc
MW
2020{
2021 struct tb_switch *sw = tb_to_switch(dev);
1cbf680f 2022 int val, ret;
e6b245cc 2023
4f7c2e0d
MW
2024 pm_runtime_get_sync(&sw->dev);
2025
2026 if (!mutex_trylock(&sw->tb->lock)) {
2027 ret = restart_syscall();
2028 goto exit_rpm;
2029 }
e6b245cc 2030
aef9c693
SC
2031 if (sw->no_nvm_upgrade) {
2032 ret = -EOPNOTSUPP;
2033 goto exit_unlock;
2034 }
2035
e6b245cc
MW
2036 /* If NVMem devices are not yet added */
2037 if (!sw->nvm) {
2038 ret = -EAGAIN;
2039 goto exit_unlock;
2040 }
2041
4b794f80 2042 ret = kstrtoint(buf, 10, &val);
e6b245cc
MW
2043 if (ret)
2044 goto exit_unlock;
2045
2046 /* Always clear the authentication status */
2047 nvm_clear_auth_status(sw);
2048
4b794f80 2049 if (val > 0) {
1cbf680f
MW
2050 if (val == AUTHENTICATE_ONLY) {
2051 if (disconnect)
4b794f80 2052 ret = -EINVAL;
1cbf680f
MW
2053 else
2054 ret = nvm_authenticate(sw, true);
2055 } else {
2056 if (!sw->nvm->flushed) {
2057 if (!sw->nvm->buf) {
2058 ret = -EINVAL;
2059 goto exit_unlock;
2060 }
2061
2062 ret = nvm_validate_and_write(sw);
2063 if (ret || val == WRITE_ONLY)
2064 goto exit_unlock;
4b794f80 2065 }
1cbf680f
MW
2066 if (val == WRITE_AND_AUTHENTICATE) {
2067 if (disconnect)
2068 ret = tb_lc_force_power(sw);
2069 else
2070 ret = nvm_authenticate(sw, false);
1cb36293 2071 }
4b794f80 2072 }
e6b245cc
MW
2073 }
2074
2075exit_unlock:
09f11b6c 2076 mutex_unlock(&sw->tb->lock);
4f7c2e0d
MW
2077exit_rpm:
2078 pm_runtime_mark_last_busy(&sw->dev);
2079 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 2080
1cb36293
ML
2081 return ret;
2082}
2083
2084static ssize_t nvm_authenticate_store(struct device *dev,
2085 struct device_attribute *attr, const char *buf, size_t count)
2086{
2087 int ret = nvm_authenticate_sysfs(dev, buf, false);
e6b245cc
MW
2088 if (ret)
2089 return ret;
2090 return count;
2091}
2092static DEVICE_ATTR_RW(nvm_authenticate);
2093
1cb36293
ML
2094static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
2095 struct device_attribute *attr, char *buf)
2096{
2097 return nvm_authenticate_show(dev, attr, buf);
2098}
2099
2100static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2101 struct device_attribute *attr, const char *buf, size_t count)
2102{
2103 int ret;
2104
2105 ret = nvm_authenticate_sysfs(dev, buf, true);
2106 return ret ? ret : count;
2107}
2108static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2109
e6b245cc
MW
2110static ssize_t nvm_version_show(struct device *dev,
2111 struct device_attribute *attr, char *buf)
2112{
2113 struct tb_switch *sw = tb_to_switch(dev);
2114 int ret;
2115
09f11b6c
MW
2116 if (!mutex_trylock(&sw->tb->lock))
2117 return restart_syscall();
e6b245cc
MW
2118
2119 if (sw->safe_mode)
2120 ret = -ENODATA;
2121 else if (!sw->nvm)
2122 ret = -EAGAIN;
2123 else
8283fb57 2124 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
e6b245cc 2125
09f11b6c 2126 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
2127
2128 return ret;
2129}
2130static DEVICE_ATTR_RO(nvm_version);
2131
bfe778ac
MW
2132static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2133 char *buf)
a25c8b2f 2134{
bfe778ac 2135 struct tb_switch *sw = tb_to_switch(dev);
a25c8b2f 2136
8283fb57 2137 return sysfs_emit(buf, "%#x\n", sw->vendor);
bfe778ac
MW
2138}
2139static DEVICE_ATTR_RO(vendor);
2140
72ee3390
MW
2141static ssize_t
2142vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2143{
2144 struct tb_switch *sw = tb_to_switch(dev);
2145
8283fb57 2146 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
72ee3390
MW
2147}
2148static DEVICE_ATTR_RO(vendor_name);
2149
bfe778ac
MW
2150static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2151 char *buf)
2152{
2153 struct tb_switch *sw = tb_to_switch(dev);
2154
8283fb57 2155 return sysfs_emit(buf, "%pUb\n", sw->uuid);
bfe778ac
MW
2156}
2157static DEVICE_ATTR_RO(unique_id);
2158
2159static struct attribute *switch_attrs[] = {
f67cf491 2160 &dev_attr_authorized.attr,
14862ee3 2161 &dev_attr_boot.attr,
bfe778ac 2162 &dev_attr_device.attr,
72ee3390 2163 &dev_attr_device_name.attr,
b406357c 2164 &dev_attr_generation.attr,
f67cf491 2165 &dev_attr_key.attr,
e6b245cc 2166 &dev_attr_nvm_authenticate.attr,
1cb36293 2167 &dev_attr_nvm_authenticate_on_disconnect.attr,
e6b245cc 2168 &dev_attr_nvm_version.attr,
91c0c120
MW
2169 &dev_attr_rx_speed.attr,
2170 &dev_attr_rx_lanes.attr,
2171 &dev_attr_tx_speed.attr,
2172 &dev_attr_tx_lanes.attr,
bfe778ac 2173 &dev_attr_vendor.attr,
72ee3390 2174 &dev_attr_vendor_name.attr,
bfe778ac
MW
2175 &dev_attr_unique_id.attr,
2176 NULL,
2177};
2178
f67cf491
MW
2179static umode_t switch_attr_is_visible(struct kobject *kobj,
2180 struct attribute *attr, int n)
2181{
fff15f23 2182 struct device *dev = kobj_to_dev(kobj);
f67cf491
MW
2183 struct tb_switch *sw = tb_to_switch(dev);
2184
3cd542e6
MW
2185 if (attr == &dev_attr_authorized.attr) {
2186 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
8e334125 2187 sw->tb->security_level == TB_SECURITY_DPONLY)
3cd542e6
MW
2188 return 0;
2189 } else if (attr == &dev_attr_device.attr) {
58f414fa
MW
2190 if (!sw->device)
2191 return 0;
2192 } else if (attr == &dev_attr_device_name.attr) {
2193 if (!sw->device_name)
2194 return 0;
2195 } else if (attr == &dev_attr_vendor.attr) {
2196 if (!sw->vendor)
2197 return 0;
2198 } else if (attr == &dev_attr_vendor_name.attr) {
2199 if (!sw->vendor_name)
2200 return 0;
2201 } else if (attr == &dev_attr_key.attr) {
f67cf491
MW
2202 if (tb_route(sw) &&
2203 sw->tb->security_level == TB_SECURITY_SECURE &&
2204 sw->security_level == TB_SECURITY_SECURE)
2205 return attr->mode;
2206 return 0;
91c0c120
MW
2207 } else if (attr == &dev_attr_rx_speed.attr ||
2208 attr == &dev_attr_rx_lanes.attr ||
2209 attr == &dev_attr_tx_speed.attr ||
2210 attr == &dev_attr_tx_lanes.attr) {
2211 if (tb_route(sw))
2212 return attr->mode;
2213 return 0;
3f415e5e 2214 } else if (attr == &dev_attr_nvm_authenticate.attr) {
b0407983 2215 if (nvm_upgradeable(sw))
3f415e5e
MW
2216 return attr->mode;
2217 return 0;
2218 } else if (attr == &dev_attr_nvm_version.attr) {
b0407983 2219 if (nvm_readable(sw))
e6b245cc
MW
2220 return attr->mode;
2221 return 0;
14862ee3
YB
2222 } else if (attr == &dev_attr_boot.attr) {
2223 if (tb_route(sw))
2224 return attr->mode;
2225 return 0;
1cb36293
ML
2226 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2227 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2228 return attr->mode;
2229 return 0;
f67cf491
MW
2230 }
2231
e6b245cc 2232 return sw->safe_mode ? 0 : attr->mode;
f67cf491
MW
2233}
2234
6889e00f 2235static const struct attribute_group switch_group = {
f67cf491 2236 .is_visible = switch_attr_is_visible,
bfe778ac
MW
2237 .attrs = switch_attrs,
2238};
ca389f71 2239
bfe778ac
MW
2240static const struct attribute_group *switch_groups[] = {
2241 &switch_group,
2242 NULL,
2243};
2244
2245static void tb_switch_release(struct device *dev)
2246{
2247 struct tb_switch *sw = tb_to_switch(dev);
b433d010 2248 struct tb_port *port;
bfe778ac 2249
3e136768
MW
2250 dma_port_free(sw->dma_port);
2251
b433d010 2252 tb_switch_for_each_port(sw, port) {
781e14ea
MW
2253 ida_destroy(&port->in_hopids);
2254 ida_destroy(&port->out_hopids);
0b2863ac
MW
2255 }
2256
bfe778ac 2257 kfree(sw->uuid);
72ee3390
MW
2258 kfree(sw->device_name);
2259 kfree(sw->vendor_name);
a25c8b2f 2260 kfree(sw->ports);
343fcb8c 2261 kfree(sw->drom);
f67cf491 2262 kfree(sw->key);
a25c8b2f
AN
2263 kfree(sw);
2264}
2265
162736b0 2266static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
2f608ba1 2267{
162736b0 2268 const struct tb_switch *sw = tb_to_switch(dev);
2f608ba1
MW
2269 const char *type;
2270
6e21007d
GF
2271 if (tb_switch_is_usb4(sw)) {
2272 if (add_uevent_var(env, "USB4_VERSION=%u.0",
2273 usb4_switch_version(sw)))
2f608ba1
MW
2274 return -ENOMEM;
2275 }
2276
2277 if (!tb_route(sw)) {
2278 type = "host";
2279 } else {
2280 const struct tb_port *port;
2281 bool hub = false;
2282
2283 /* Device is hub if it has any downstream ports */
2284 tb_switch_for_each_port(sw, port) {
2285 if (!port->disabled && !tb_is_upstream_port(port) &&
2286 tb_port_is_null(port)) {
2287 hub = true;
2288 break;
2289 }
2290 }
2291
2292 type = hub ? "hub" : "device";
2293 }
2294
2295 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2296 return -ENOMEM;
2297 return 0;
2298}
2299
2d8ff0b5
MW
2300/*
2301 * Currently only need to provide the callbacks. Everything else is handled
2302 * in the connection manager.
2303 */
2304static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2305{
4f7c2e0d
MW
2306 struct tb_switch *sw = tb_to_switch(dev);
2307 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2308
2309 if (cm_ops->runtime_suspend_switch)
2310 return cm_ops->runtime_suspend_switch(sw);
2311
2d8ff0b5
MW
2312 return 0;
2313}
2314
2315static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2316{
4f7c2e0d
MW
2317 struct tb_switch *sw = tb_to_switch(dev);
2318 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2319
2320 if (cm_ops->runtime_resume_switch)
2321 return cm_ops->runtime_resume_switch(sw);
2d8ff0b5
MW
2322 return 0;
2323}
2324
2325static const struct dev_pm_ops tb_switch_pm_ops = {
2326 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2327 NULL)
2328};
2329
b8a73083 2330const struct device_type tb_switch_type = {
bfe778ac
MW
2331 .name = "thunderbolt_device",
2332 .release = tb_switch_release,
2f608ba1 2333 .uevent = tb_switch_uevent,
2d8ff0b5 2334 .pm = &tb_switch_pm_ops,
bfe778ac
MW
2335};
2336
2c3c4197
MW
2337static int tb_switch_get_generation(struct tb_switch *sw)
2338{
d589fd42
MW
2339 if (tb_switch_is_usb4(sw))
2340 return 4;
2c3c4197 2341
d589fd42
MW
2342 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
2343 switch (sw->config.device_id) {
2344 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2345 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2346 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2347 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2348 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2349 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2350 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2351 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2352 return 1;
b0407983 2353
d589fd42
MW
2354 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2355 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2356 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2357 return 2;
2358
2359 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2360 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2361 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2362 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2363 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2364 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2365 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2366 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2367 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2368 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2369 return 3;
2370 }
2c3c4197 2371 }
d589fd42
MW
2372
2373 /*
2374 * For unknown switches assume generation to be 1 to be on the
2375 * safe side.
2376 */
2377 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2378 sw->config.device_id);
2379 return 1;
2c3c4197
MW
2380}
2381
b0407983
MW
2382static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2383{
2384 int max_depth;
2385
2386 if (tb_switch_is_usb4(sw) ||
2387 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2388 max_depth = USB4_SWITCH_MAX_DEPTH;
2389 else
2390 max_depth = TB_SWITCH_MAX_DEPTH;
2391
2392 return depth > max_depth;
2393}
2394
a25c8b2f 2395/**
bfe778ac
MW
2396 * tb_switch_alloc() - allocate a switch
2397 * @tb: Pointer to the owning domain
2398 * @parent: Parent device for this switch
2399 * @route: Route string for this switch
a25c8b2f 2400 *
bfe778ac
MW
2401 * Allocates and initializes a switch. Will not upload configuration to
2402 * the switch. For that you need to call tb_switch_configure()
2403 * separately. The returned switch should be released by calling
2404 * tb_switch_put().
2405 *
444ac384
MW
2406 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2407 * failure.
a25c8b2f 2408 */
bfe778ac
MW
2409struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2410 u64 route)
a25c8b2f 2411{
a25c8b2f 2412 struct tb_switch *sw;
f0342e75 2413 int upstream_port;
444ac384 2414 int i, ret, depth;
f0342e75 2415
b0407983
MW
2416 /* Unlock the downstream port so we can access the switch below */
2417 if (route) {
2418 struct tb_switch *parent_sw = tb_to_switch(parent);
2419 struct tb_port *down;
2420
2421 down = tb_port_at(route, parent_sw);
2422 tb_port_unlock(down);
2423 }
2424
f0342e75 2425 depth = tb_route_length(route);
f0342e75
MW
2426
2427 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
a25c8b2f 2428 if (upstream_port < 0)
444ac384 2429 return ERR_PTR(upstream_port);
a25c8b2f
AN
2430
2431 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2432 if (!sw)
444ac384 2433 return ERR_PTR(-ENOMEM);
a25c8b2f
AN
2434
2435 sw->tb = tb;
444ac384
MW
2436 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2437 if (ret)
bfe778ac
MW
2438 goto err_free_sw_ports;
2439
b0407983
MW
2440 sw->generation = tb_switch_get_generation(sw);
2441
daa5140f 2442 tb_dbg(tb, "current switch config:\n");
b0407983 2443 tb_dump_switch(tb, sw);
a25c8b2f
AN
2444
2445 /* configure switch */
2446 sw->config.upstream_port_number = upstream_port;
f0342e75
MW
2447 sw->config.depth = depth;
2448 sw->config.route_hi = upper_32_bits(route);
2449 sw->config.route_lo = lower_32_bits(route);
bfe778ac 2450 sw->config.enabled = 0;
a25c8b2f 2451
b0407983 2452 /* Make sure we do not exceed maximum topology limit */
704a940d
CIK
2453 if (tb_switch_exceeds_max_depth(sw, depth)) {
2454 ret = -EADDRNOTAVAIL;
2455 goto err_free_sw_ports;
2456 }
b0407983 2457
a25c8b2f
AN
2458 /* initialize ports */
2459 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
343fcb8c 2460 GFP_KERNEL);
444ac384
MW
2461 if (!sw->ports) {
2462 ret = -ENOMEM;
bfe778ac 2463 goto err_free_sw_ports;
444ac384 2464 }
a25c8b2f
AN
2465
2466 for (i = 0; i <= sw->config.max_port_number; i++) {
343fcb8c
AN
2467 /* minimum setup for tb_find_cap and tb_drom_read to work */
2468 sw->ports[i].sw = sw;
2469 sw->ports[i].port = i;
781e14ea
MW
2470
2471 /* Control port does not need HopID allocation */
2472 if (i) {
2473 ida_init(&sw->ports[i].in_hopids);
2474 ida_init(&sw->ports[i].out_hopids);
2475 }
a25c8b2f
AN
2476 }
2477
444ac384 2478 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
b0407983
MW
2479 if (ret > 0)
2480 sw->cap_plug_events = ret;
ca389f71 2481
23ccd21c
GF
2482 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2483 if (ret > 0)
2484 sw->cap_vsec_tmu = ret;
2485
444ac384
MW
2486 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2487 if (ret > 0)
2488 sw->cap_lc = ret;
a9be5582 2489
43f977bc
GF
2490 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2491 if (ret > 0)
2492 sw->cap_lp = ret;
2493
f67cf491
MW
2494 /* Root switch is always authorized */
2495 if (!route)
2496 sw->authorized = true;
2497
bfe778ac
MW
2498 device_initialize(&sw->dev);
2499 sw->dev.parent = parent;
2500 sw->dev.bus = &tb_bus_type;
2501 sw->dev.type = &tb_switch_type;
2502 sw->dev.groups = switch_groups;
2503 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2504
2505 return sw;
2506
2507err_free_sw_ports:
2508 kfree(sw->ports);
2509 kfree(sw);
2510
444ac384 2511 return ERR_PTR(ret);
bfe778ac
MW
2512}
2513
e6b245cc
MW
2514/**
2515 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2516 * @tb: Pointer to the owning domain
2517 * @parent: Parent device for this switch
2518 * @route: Route string for this switch
2519 *
2520 * This creates a switch in safe mode. This means the switch pretty much
2521 * lacks all capabilities except DMA configuration port before it is
2522 * flashed with a valid NVM firmware.
2523 *
2524 * The returned switch must be released by calling tb_switch_put().
2525 *
444ac384 2526 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
e6b245cc
MW
2527 */
2528struct tb_switch *
2529tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2530{
2531 struct tb_switch *sw;
2532
2533 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2534 if (!sw)
444ac384 2535 return ERR_PTR(-ENOMEM);
e6b245cc
MW
2536
2537 sw->tb = tb;
2538 sw->config.depth = tb_route_length(route);
2539 sw->config.route_hi = upper_32_bits(route);
2540 sw->config.route_lo = lower_32_bits(route);
2541 sw->safe_mode = true;
2542
2543 device_initialize(&sw->dev);
2544 sw->dev.parent = parent;
2545 sw->dev.bus = &tb_bus_type;
2546 sw->dev.type = &tb_switch_type;
2547 sw->dev.groups = switch_groups;
2548 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2549
2550 return sw;
2551}
2552
bfe778ac
MW
2553/**
2554 * tb_switch_configure() - Uploads configuration to the switch
2555 * @sw: Switch to configure
2556 *
2557 * Call this function before the switch is added to the system. It will
2558 * upload configuration to the switch and makes it available for the
b0407983
MW
2559 * connection manager to use. Can be called to the switch again after
2560 * resume from low power states to re-initialize it.
bfe778ac
MW
2561 *
2562 * Return: %0 in case of success and negative errno in case of failure
2563 */
2564int tb_switch_configure(struct tb_switch *sw)
2565{
2566 struct tb *tb = sw->tb;
2567 u64 route;
2568 int ret;
2569
2570 route = tb_route(sw);
bfe778ac 2571
b0407983 2572 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
b2911a59 2573 sw->config.enabled ? "restoring" : "initializing", route,
b0407983 2574 tb_route_length(route), sw->config.upstream_port_number);
bfe778ac 2575
bfe778ac
MW
2576 sw->config.enabled = 1;
2577
b0407983
MW
2578 if (tb_switch_is_usb4(sw)) {
2579 /*
2580 * For USB4 devices, we need to program the CM version
2581 * accordingly so that it knows to expose all the
14200a26
GF
2582 * additional capabilities. Program it according to USB4
2583 * version to avoid changing existing (v1) routers behaviour.
b0407983 2584 */
14200a26
GF
2585 if (usb4_switch_version(sw) < 2)
2586 sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
2587 else
2588 sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
31f87f70 2589 sw->config.plug_events_delay = 0xa;
b0407983
MW
2590
2591 /* Enumerate the switch */
2592 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2593 ROUTER_CS_1, 4);
2594 if (ret)
2595 return ret;
bfe778ac 2596
b0407983 2597 ret = usb4_switch_setup(sw);
b0407983
MW
2598 } else {
2599 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2600 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2601 sw->config.vendor_id);
2602
2603 if (!sw->cap_plug_events) {
2604 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2605 return -ENODEV;
2606 }
2607
2608 /* Enumerate the switch */
2609 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2610 ROUTER_CS_1, 3);
b0407983 2611 }
e879a709
MW
2612 if (ret)
2613 return ret;
2614
bfe778ac
MW
2615 return tb_plug_events_active(sw, true);
2616}
2617
d49b4f04
MW
2618/**
2619 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
2620 * @sw: Router to configure
2621 *
2622 * Needs to be called before any tunnels can be setup through the
2623 * router. Can be called to any router.
2624 *
2625 * Returns %0 in success and negative errno otherwise.
2626 */
2627int tb_switch_configuration_valid(struct tb_switch *sw)
2628{
2629 if (tb_switch_is_usb4(sw))
2630 return usb4_switch_configuration_valid(sw);
2631 return 0;
2632}
2633
2cc12751 2634static int tb_switch_set_uuid(struct tb_switch *sw)
bfe778ac 2635{
b0407983 2636 bool uid = false;
bfe778ac 2637 u32 uuid[4];
a9be5582 2638 int ret;
bfe778ac
MW
2639
2640 if (sw->uuid)
a9be5582 2641 return 0;
bfe778ac 2642
b0407983
MW
2643 if (tb_switch_is_usb4(sw)) {
2644 ret = usb4_switch_read_uid(sw, &sw->uid);
2645 if (ret)
2646 return ret;
2647 uid = true;
2648 } else {
2649 /*
2650 * The newer controllers include fused UUID as part of
2651 * link controller specific registers
2652 */
2653 ret = tb_lc_read_uuid(sw, uuid);
2654 if (ret) {
2655 if (ret != -EINVAL)
2656 return ret;
2657 uid = true;
2658 }
2659 }
2660
2661 if (uid) {
bfe778ac
MW
2662 /*
2663 * ICM generates UUID based on UID and fills the upper
2664 * two words with ones. This is not strictly following
2665 * UUID format but we want to be compatible with it so
2666 * we do the same here.
2667 */
2668 uuid[0] = sw->uid & 0xffffffff;
2669 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2670 uuid[2] = 0xffffffff;
2671 uuid[3] = 0xffffffff;
2672 }
2673
2674 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2cc12751 2675 if (!sw->uuid)
a9be5582
MW
2676 return -ENOMEM;
2677 return 0;
bfe778ac
MW
2678}
2679
e6b245cc 2680static int tb_switch_add_dma_port(struct tb_switch *sw)
3e136768 2681{
e6b245cc
MW
2682 u32 status;
2683 int ret;
2684
3e136768 2685 switch (sw->generation) {
3e136768
MW
2686 case 2:
2687 /* Only root switch can be upgraded */
2688 if (tb_route(sw))
e6b245cc 2689 return 0;
7a7ebfa8 2690
df561f66 2691 fallthrough;
7a7ebfa8 2692 case 3:
661b1947 2693 case 4:
7a7ebfa8
MW
2694 ret = tb_switch_set_uuid(sw);
2695 if (ret)
2696 return ret;
3e136768
MW
2697 break;
2698
2699 default:
e6b245cc
MW
2700 /*
2701 * DMA port is the only thing available when the switch
2702 * is in safe mode.
2703 */
2704 if (!sw->safe_mode)
2705 return 0;
2706 break;
3e136768
MW
2707 }
2708
661b1947
MW
2709 if (sw->no_nvm_upgrade)
2710 return 0;
2711
2712 if (tb_switch_is_usb4(sw)) {
2713 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2714 if (ret)
2715 return ret;
2716
2717 if (status) {
2718 tb_sw_info(sw, "switch flash authentication failed\n");
2719 nvm_set_auth_status(sw, status);
2720 }
2721
2722 return 0;
2723 }
2724
3f415e5e 2725 /* Root switch DMA port requires running firmware */
f07a3608 2726 if (!tb_route(sw) && !tb_switch_is_icm(sw))
e6b245cc
MW
2727 return 0;
2728
3e136768 2729 sw->dma_port = dma_port_alloc(sw);
e6b245cc
MW
2730 if (!sw->dma_port)
2731 return 0;
2732
7a7ebfa8
MW
2733 /*
2734 * If there is status already set then authentication failed
2735 * when the dma_port_flash_update_auth() returned. Power cycling
2736 * is not needed (it was done already) so only thing we do here
2737 * is to unblock runtime PM of the root port.
2738 */
2739 nvm_get_auth_status(sw, &status);
2740 if (status) {
2741 if (!tb_route(sw))
b0407983 2742 nvm_authenticate_complete_dma_port(sw);
7a7ebfa8
MW
2743 return 0;
2744 }
2745
e6b245cc
MW
2746 /*
2747 * Check status of the previous flash authentication. If there
2748 * is one we need to power cycle the switch in any case to make
2749 * it functional again.
2750 */
2751 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2752 if (ret <= 0)
2753 return ret;
2754
1830b6ee
MW
2755 /* Now we can allow root port to suspend again */
2756 if (!tb_route(sw))
b0407983 2757 nvm_authenticate_complete_dma_port(sw);
1830b6ee 2758
e6b245cc
MW
2759 if (status) {
2760 tb_sw_info(sw, "switch flash authentication failed\n");
e6b245cc
MW
2761 nvm_set_auth_status(sw, status);
2762 }
2763
2764 tb_sw_info(sw, "power cycling the switch now\n");
2765 dma_port_power_cycle(sw->dma_port);
2766
2767 /*
2768 * We return error here which causes the switch adding failure.
2769 * It should appear back after power cycle is complete.
2770 */
2771 return -ESHUTDOWN;
3e136768
MW
2772}
2773
0d46c08d
MW
2774static void tb_switch_default_link_ports(struct tb_switch *sw)
2775{
2776 int i;
2777
42716425 2778 for (i = 1; i <= sw->config.max_port_number; i++) {
0d46c08d
MW
2779 struct tb_port *port = &sw->ports[i];
2780 struct tb_port *subordinate;
2781
2782 if (!tb_port_is_null(port))
2783 continue;
2784
2785 /* Check for the subordinate port */
2786 if (i == sw->config.max_port_number ||
2787 !tb_port_is_null(&sw->ports[i + 1]))
2788 continue;
2789
2790 /* Link them if not already done so (by DROM) */
2791 subordinate = &sw->ports[i + 1];
2792 if (!port->dual_link_port && !subordinate->dual_link_port) {
2793 port->link_nr = 0;
2794 port->dual_link_port = subordinate;
2795 subordinate->link_nr = 1;
2796 subordinate->dual_link_port = port;
2797
2798 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2799 port->port, subordinate->port);
2800 }
2801 }
2802}
2803
91c0c120
MW
2804static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2805{
2806 const struct tb_port *up = tb_upstream_port(sw);
2807
2808 if (!up->dual_link_port || !up->dual_link_port->remote)
2809 return false;
2810
b0407983
MW
2811 if (tb_switch_is_usb4(sw))
2812 return usb4_switch_lane_bonding_possible(sw);
91c0c120
MW
2813 return tb_lc_lane_bonding_possible(sw);
2814}
2815
2816static int tb_switch_update_link_attributes(struct tb_switch *sw)
2817{
2818 struct tb_port *up;
2819 bool change = false;
2820 int ret;
2821
2822 if (!tb_route(sw) || tb_switch_is_icm(sw))
2823 return 0;
2824
2825 up = tb_upstream_port(sw);
2826
2827 ret = tb_port_get_link_speed(up);
2828 if (ret < 0)
2829 return ret;
2830 if (sw->link_speed != ret)
2831 change = true;
2832 sw->link_speed = ret;
2833
2834 ret = tb_port_get_link_width(up);
2835 if (ret < 0)
2836 return ret;
2837 if (sw->link_width != ret)
2838 change = true;
2839 sw->link_width = ret;
2840
2841 /* Notify userspace that there is possible link attribute change */
2842 if (device_is_registered(&sw->dev) && change)
2843 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2844
2845 return 0;
2846}
2847
81af2952
GF
2848/* Must be called after tb_switch_update_link_attributes() */
2849static void tb_switch_link_init(struct tb_switch *sw)
2850{
2851 struct tb_port *up, *down;
2852 bool bonded;
2853
2854 if (!tb_route(sw) || tb_switch_is_icm(sw))
2855 return;
2856
2857 tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
30c6759b 2858 tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width));
81af2952
GF
2859
2860 bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
2861
2862 /*
2863 * Gen 4 links come up as bonded so update the port structures
2864 * accordingly.
2865 */
2866 up = tb_upstream_port(sw);
2867 down = tb_switch_downstream_port(sw);
2868
2869 up->bonded = bonded;
2870 if (up->dual_link_port)
2871 up->dual_link_port->bonded = bonded;
2872 tb_port_update_credits(up);
2873
2874 down->bonded = bonded;
2875 if (down->dual_link_port)
2876 down->dual_link_port->bonded = bonded;
2877 tb_port_update_credits(down);
ba2a2a86
GF
2878
2879 if (tb_port_get_link_generation(up) < 4)
2880 return;
2881
2882 /*
2883 * Set the Gen 4 preferred link width. This is what the router
2884 * prefers when the link is brought up. If the router does not
2885 * support asymmetric link configuration, this also will be set
2886 * to TB_LINK_WIDTH_DUAL.
2887 */
2888 sw->preferred_link_width = sw->link_width;
2889 tb_sw_dbg(sw, "preferred link width %s\n",
2890 tb_width_name(sw->preferred_link_width));
81af2952
GF
2891}
2892
91c0c120
MW
2893/**
2894 * tb_switch_lane_bonding_enable() - Enable lane bonding
2895 * @sw: Switch to enable lane bonding
2896 *
2897 * Connection manager can call this function to enable lane bonding of a
2898 * switch. If conditions are correct and both switches support the feature,
2899 * lanes are bonded. It is safe to call this to any switch.
2900 */
81af2952 2901static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
91c0c120 2902{
91c0c120 2903 struct tb_port *up, *down;
81af2952 2904 unsigned int width;
91c0c120
MW
2905 int ret;
2906
91c0c120
MW
2907 if (!tb_switch_lane_bonding_possible(sw))
2908 return 0;
2909
2910 up = tb_upstream_port(sw);
7ce54221 2911 down = tb_switch_downstream_port(sw);
91c0c120 2912
81af2952
GF
2913 if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
2914 !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
91c0c120
MW
2915 return 0;
2916
a9fdf5f9
MW
2917 /*
2918 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2919 * CL0 and check just for lane 1.
2920 */
2921 if (tb_wait_for_port(down->dual_link_port, false) <= 0)
2922 return -ENOTCONN;
2923
91c0c120
MW
2924 ret = tb_port_lane_bonding_enable(up);
2925 if (ret) {
2926 tb_port_warn(up, "failed to enable lane bonding\n");
2927 return ret;
2928 }
2929
2930 ret = tb_port_lane_bonding_enable(down);
2931 if (ret) {
2932 tb_port_warn(down, "failed to enable lane bonding\n");
2933 tb_port_lane_bonding_disable(up);
2934 return ret;
2935 }
2936
e111fb92 2937 /* Any of the widths are all bonded */
81af2952
GF
2938 width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
2939 TB_LINK_WIDTH_ASYM_RX;
e7051bea 2940
81af2952 2941 return tb_port_wait_for_link_width(down, width, 100);
91c0c120
MW
2942}
2943
2944/**
2945 * tb_switch_lane_bonding_disable() - Disable lane bonding
2946 * @sw: Switch whose lane bonding to disable
2947 *
2948 * Disables lane bonding between @sw and parent. This can be called even
2949 * if lanes were not bonded originally.
2950 */
81af2952 2951static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
91c0c120 2952{
91c0c120 2953 struct tb_port *up, *down;
e111fb92 2954 int ret;
91c0c120 2955
91c0c120
MW
2956 up = tb_upstream_port(sw);
2957 if (!up->bonded)
81af2952 2958 return 0;
91c0c120 2959
81af2952
GF
2960 /*
2961 * If the link is Gen 4 there is no way to switch the link to
2962 * two single lane links so avoid that here. Also don't bother
2963 * if the link is not up anymore (sw is unplugged).
2964 */
2965 ret = tb_port_get_link_generation(up);
2966 if (ret < 0)
2967 return ret;
2968 if (ret >= 4)
2969 return -EOPNOTSUPP;
91c0c120 2970
81af2952 2971 down = tb_switch_downstream_port(sw);
91c0c120
MW
2972 tb_port_lane_bonding_disable(up);
2973 tb_port_lane_bonding_disable(down);
2974
e7051bea
MW
2975 /*
2976 * It is fine if we get other errors as the router might have
2977 * been unplugged.
2978 */
81af2952
GF
2979 return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
2980}
2981
5391bcfa 2982/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
81af2952
GF
2983static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
2984{
2985 struct tb_port *up, *down, *port;
2986 enum tb_link_width down_width;
2987 int ret;
2988
2989 up = tb_upstream_port(sw);
2990 down = tb_switch_downstream_port(sw);
2991
2992 if (width == TB_LINK_WIDTH_ASYM_TX) {
2993 down_width = TB_LINK_WIDTH_ASYM_RX;
2994 port = down;
2995 } else {
2996 down_width = TB_LINK_WIDTH_ASYM_TX;
2997 port = up;
2998 }
2999
3000 ret = tb_port_set_link_width(up, width);
3001 if (ret)
3002 return ret;
3003
3004 ret = tb_port_set_link_width(down, down_width);
3005 if (ret)
3006 return ret;
3007
3008 /*
3009 * Initiate the change in the router that one of its TX lanes is
3010 * changing to RX but do so only if there is an actual change.
3011 */
3012 if (sw->link_width != width) {
3013 ret = usb4_port_asym_start(port);
3014 if (ret)
3015 return ret;
3016
3017 ret = tb_port_wait_for_link_width(up, width, 100);
3018 if (ret)
3019 return ret;
3020 }
3021
81af2952
GF
3022 return 0;
3023}
3024
5391bcfa 3025/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
81af2952
GF
3026static int tb_switch_asym_disable(struct tb_switch *sw)
3027{
3028 struct tb_port *up, *down;
3029 int ret;
3030
3031 up = tb_upstream_port(sw);
3032 down = tb_switch_downstream_port(sw);
3033
3034 ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
3035 if (ret)
3036 return ret;
3037
3038 ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
3039 if (ret)
3040 return ret;
3041
3042 /*
3043 * Initiate the change in the router that has three TX lanes and
3044 * is changing one of its TX lanes to RX but only if there is a
3045 * change in the link width.
3046 */
3047 if (sw->link_width > TB_LINK_WIDTH_DUAL) {
3048 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
3049 ret = usb4_port_asym_start(up);
3050 else
3051 ret = usb4_port_asym_start(down);
3052 if (ret)
3053 return ret;
3054
3055 ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
3056 if (ret)
3057 return ret;
3058 }
3059
81af2952
GF
3060 return 0;
3061}
3062
3063/**
3064 * tb_switch_set_link_width() - Configure router link width
3065 * @sw: Router to configure
3066 * @width: The new link width
3067 *
3068 * Set device router link width to @width from router upstream port
3069 * perspective. Supports also asymmetric links if the routers boths side
3070 * of the link supports it.
3071 *
3072 * Does nothing for host router.
3073 *
3074 * Returns %0 in case of success, negative errno otherwise.
3075 */
3076int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
3077{
3078 struct tb_port *up, *down;
3079 int ret = 0;
3080
3081 if (!tb_route(sw))
3082 return 0;
3083
3084 up = tb_upstream_port(sw);
3085 down = tb_switch_downstream_port(sw);
3086
3087 switch (width) {
3088 case TB_LINK_WIDTH_SINGLE:
3089 ret = tb_switch_lane_bonding_disable(sw);
3090 break;
3091
3092 case TB_LINK_WIDTH_DUAL:
3093 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
3094 sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
3095 ret = tb_switch_asym_disable(sw);
3096 if (ret)
3097 break;
3098 }
3099 ret = tb_switch_lane_bonding_enable(sw);
3100 break;
3101
3102 case TB_LINK_WIDTH_ASYM_TX:
3103 case TB_LINK_WIDTH_ASYM_RX:
3104 ret = tb_switch_asym_enable(sw, width);
3105 break;
3106 }
3107
3108 switch (ret) {
3109 case 0:
3110 break;
3111
3112 case -ETIMEDOUT:
3113 tb_sw_warn(sw, "timeout changing link width\n");
3114 return ret;
3115
3116 case -ENOTCONN:
3117 case -EOPNOTSUPP:
3118 case -ENODEV:
3119 return ret;
3120
3121 default:
3122 tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
3123 return ret;
3124 }
e7051bea 3125
69fea377
MW
3126 tb_port_update_credits(down);
3127 tb_port_update_credits(up);
81af2952 3128
91c0c120 3129 tb_switch_update_link_attributes(sw);
69fea377 3130
30c6759b 3131 tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width));
81af2952 3132 return ret;
91c0c120
MW
3133}
3134
de462039
MW
3135/**
3136 * tb_switch_configure_link() - Set link configured
3137 * @sw: Switch whose link is configured
3138 *
3139 * Sets the link upstream from @sw configured (from both ends) so that
3140 * it will not be disconnected when the domain exits sleep. Can be
3141 * called for any switch.
3142 *
3143 * It is recommended that this is called after lane bonding is enabled.
3144 *
3145 * Returns %0 on success and negative errno in case of error.
3146 */
3147int tb_switch_configure_link(struct tb_switch *sw)
3148{
e28178bf
MW
3149 struct tb_port *up, *down;
3150 int ret;
3151
de462039
MW
3152 if (!tb_route(sw) || tb_switch_is_icm(sw))
3153 return 0;
3154
e28178bf
MW
3155 up = tb_upstream_port(sw);
3156 if (tb_switch_is_usb4(up->sw))
3157 ret = usb4_port_configure(up);
3158 else
3159 ret = tb_lc_configure_port(up);
3160 if (ret)
3161 return ret;
3162
3163 down = up->remote;
3164 if (tb_switch_is_usb4(down->sw))
3165 return usb4_port_configure(down);
3166 return tb_lc_configure_port(down);
de462039
MW
3167}
3168
3169/**
3170 * tb_switch_unconfigure_link() - Unconfigure link
3171 * @sw: Switch whose link is unconfigured
3172 *
3173 * Sets the link unconfigured so the @sw will be disconnected if the
3174 * domain exists sleep.
3175 */
3176void tb_switch_unconfigure_link(struct tb_switch *sw)
3177{
e28178bf
MW
3178 struct tb_port *up, *down;
3179
de462039
MW
3180 if (sw->is_unplugged)
3181 return;
3182 if (!tb_route(sw) || tb_switch_is_icm(sw))
3183 return;
3184
e28178bf
MW
3185 up = tb_upstream_port(sw);
3186 if (tb_switch_is_usb4(up->sw))
3187 usb4_port_unconfigure(up);
3188 else
3189 tb_lc_unconfigure_port(up);
3190
3191 down = up->remote;
3192 if (tb_switch_is_usb4(down->sw))
3193 usb4_port_unconfigure(down);
de462039 3194 else
e28178bf 3195 tb_lc_unconfigure_port(down);
de462039
MW
3196}
3197
56ad3aef
MW
3198static void tb_switch_credits_init(struct tb_switch *sw)
3199{
3200 if (tb_switch_is_icm(sw))
3201 return;
3202 if (!tb_switch_is_usb4(sw))
3203 return;
3204 if (usb4_switch_credits_init(sw))
3205 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
3206}
3207
5d2569cb
ML
3208static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
3209{
3210 struct tb_port *port;
3211
3212 if (tb_switch_is_icm(sw))
3213 return 0;
3214
3215 tb_switch_for_each_port(sw, port) {
3216 int res;
3217
3218 if (!port->cap_usb4)
3219 continue;
3220
3221 res = usb4_port_hotplug_enable(port);
3222 if (res)
3223 return res;
3224 }
3225 return 0;
3226}
3227
bfe778ac
MW
3228/**
3229 * tb_switch_add() - Add a switch to the domain
3230 * @sw: Switch to add
3231 *
3232 * This is the last step in adding switch to the domain. It will read
3233 * identification information from DROM and initializes ports so that
3234 * they can be used to connect other switches. The switch will be
3235 * exposed to the userspace when this function successfully returns. To
3236 * remove and release the switch, call tb_switch_remove().
3237 *
3238 * Return: %0 in case of success and negative errno in case of failure
3239 */
3240int tb_switch_add(struct tb_switch *sw)
3241{
3242 int i, ret;
3243
3e136768
MW
3244 /*
3245 * Initialize DMA control port now before we read DROM. Recent
3246 * host controllers have more complete DROM on NVM that includes
3247 * vendor and model identification strings which we then expose
3248 * to the userspace. NVM can be accessed through DMA
3249 * configuration based mailbox.
3250 */
e6b245cc 3251 ret = tb_switch_add_dma_port(sw);
af99f696
MW
3252 if (ret) {
3253 dev_err(&sw->dev, "failed to add DMA port\n");
f53e7676 3254 return ret;
af99f696 3255 }
343fcb8c 3256
e6b245cc 3257 if (!sw->safe_mode) {
56ad3aef
MW
3258 tb_switch_credits_init(sw);
3259
e6b245cc
MW
3260 /* read drom */
3261 ret = tb_drom_read(sw);
6915812b
ML
3262 if (ret)
3263 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
daa5140f 3264 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
bfe778ac 3265
2cc12751 3266 ret = tb_switch_set_uuid(sw);
af99f696
MW
3267 if (ret) {
3268 dev_err(&sw->dev, "failed to set UUID\n");
2cc12751 3269 return ret;
af99f696 3270 }
e6b245cc
MW
3271
3272 for (i = 0; i <= sw->config.max_port_number; i++) {
3273 if (sw->ports[i].disabled) {
daa5140f 3274 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
e6b245cc
MW
3275 continue;
3276 }
3277 ret = tb_init_port(&sw->ports[i]);
af99f696
MW
3278 if (ret) {
3279 dev_err(&sw->dev, "failed to initialize port %d\n", i);
e6b245cc 3280 return ret;
af99f696 3281 }
343fcb8c 3282 }
91c0c120 3283
d2d6ddf1
MW
3284 tb_check_quirks(sw);
3285
0d46c08d
MW
3286 tb_switch_default_link_ports(sw);
3287
91c0c120
MW
3288 ret = tb_switch_update_link_attributes(sw);
3289 if (ret)
3290 return ret;
cf29b9af 3291
81af2952
GF
3292 tb_switch_link_init(sw);
3293
768e6fe6
MW
3294 ret = tb_switch_clx_init(sw);
3295 if (ret)
3296 return ret;
3297
cf29b9af
RM
3298 ret = tb_switch_tmu_init(sw);
3299 if (ret)
3300 return ret;
343fcb8c
AN
3301 }
3302
5d2569cb
ML
3303 ret = tb_switch_port_hotplug_enable(sw);
3304 if (ret)
3305 return ret;
3306
e6b245cc 3307 ret = device_add(&sw->dev);
af99f696
MW
3308 if (ret) {
3309 dev_err(&sw->dev, "failed to add device: %d\n", ret);
e6b245cc 3310 return ret;
af99f696 3311 }
e6b245cc 3312
a83bc4a5
MW
3313 if (tb_route(sw)) {
3314 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3315 sw->vendor, sw->device);
3316 if (sw->vendor_name && sw->device_name)
3317 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3318 sw->device_name);
3319 }
3320
cae5f515
MW
3321 ret = usb4_switch_add_ports(sw);
3322 if (ret) {
3323 dev_err(&sw->dev, "failed to add USB4 ports\n");
3324 goto err_del;
3325 }
3326
e6b245cc 3327 ret = tb_switch_nvm_add(sw);
2d8ff0b5 3328 if (ret) {
af99f696 3329 dev_err(&sw->dev, "failed to add NVM devices\n");
cae5f515 3330 goto err_ports;
2d8ff0b5 3331 }
e6b245cc 3332
b2911a59
MW
3333 /*
3334 * Thunderbolt routers do not generate wakeups themselves but
3335 * they forward wakeups from tunneled protocols, so enable it
3336 * here.
3337 */
3338 device_init_wakeup(&sw->dev, true);
3339
2d8ff0b5
MW
3340 pm_runtime_set_active(&sw->dev);
3341 if (sw->rpm) {
3342 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3343 pm_runtime_use_autosuspend(&sw->dev);
3344 pm_runtime_mark_last_busy(&sw->dev);
3345 pm_runtime_enable(&sw->dev);
3346 pm_request_autosuspend(&sw->dev);
3347 }
3348
54e41810 3349 tb_switch_debugfs_init(sw);
2d8ff0b5 3350 return 0;
cae5f515
MW
3351
3352err_ports:
3353 usb4_switch_remove_ports(sw);
3354err_del:
3355 device_del(&sw->dev);
3356
3357 return ret;
bfe778ac 3358}
c90553b3 3359
bfe778ac
MW
3360/**
3361 * tb_switch_remove() - Remove and release a switch
3362 * @sw: Switch to remove
3363 *
3364 * This will remove the switch from the domain and release it after last
3365 * reference count drops to zero. If there are switches connected below
3366 * this switch, they will be removed as well.
3367 */
3368void tb_switch_remove(struct tb_switch *sw)
3369{
b433d010 3370 struct tb_port *port;
ca389f71 3371
54e41810
GF
3372 tb_switch_debugfs_remove(sw);
3373
2d8ff0b5
MW
3374 if (sw->rpm) {
3375 pm_runtime_get_sync(&sw->dev);
3376 pm_runtime_disable(&sw->dev);
3377 }
3378
bfe778ac 3379 /* port 0 is the switch itself and never has a remote */
b433d010
MW
3380 tb_switch_for_each_port(sw, port) {
3381 if (tb_port_has_remote(port)) {
3382 tb_switch_remove(port->remote->sw);
3383 port->remote = NULL;
3384 } else if (port->xdomain) {
3385 tb_xdomain_remove(port->xdomain);
3386 port->xdomain = NULL;
dfe40ca4 3387 }
dacb1287
KK
3388
3389 /* Remove any downstream retimers */
3390 tb_retimer_remove_all(port);
bfe778ac
MW
3391 }
3392
3393 if (!sw->is_unplugged)
3394 tb_plug_events_active(sw, false);
b0407983 3395
e6b245cc 3396 tb_switch_nvm_remove(sw);
cae5f515 3397 usb4_switch_remove_ports(sw);
a83bc4a5
MW
3398
3399 if (tb_route(sw))
3400 dev_info(&sw->dev, "device disconnected\n");
bfe778ac 3401 device_unregister(&sw->dev);
a25c8b2f
AN
3402}
3403
053596d9 3404/**
aae20bb6 3405 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
5c6b471b 3406 * @sw: Router to mark unplugged
053596d9 3407 */
aae20bb6 3408void tb_sw_set_unplugged(struct tb_switch *sw)
053596d9 3409{
b433d010
MW
3410 struct tb_port *port;
3411
053596d9
AN
3412 if (sw == sw->tb->root_switch) {
3413 tb_sw_WARN(sw, "cannot unplug root switch\n");
3414 return;
3415 }
3416 if (sw->is_unplugged) {
3417 tb_sw_WARN(sw, "is_unplugged already set\n");
3418 return;
3419 }
3420 sw->is_unplugged = true;
b433d010
MW
3421 tb_switch_for_each_port(sw, port) {
3422 if (tb_port_has_remote(port))
3423 tb_sw_set_unplugged(port->remote->sw);
3424 else if (port->xdomain)
3425 port->xdomain->is_unplugged = true;
053596d9
AN
3426 }
3427}
3428
b2911a59
MW
3429static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3430{
3431 if (flags)
3432 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3433 else
3434 tb_sw_dbg(sw, "disabling wakeup\n");
3435
3436 if (tb_switch_is_usb4(sw))
3437 return usb4_switch_set_wake(sw, flags);
3438 return tb_lc_set_wake(sw, flags);
3439}
3440
23dd5bb4
AN
3441int tb_switch_resume(struct tb_switch *sw)
3442{
b433d010
MW
3443 struct tb_port *port;
3444 int err;
3445
daa5140f 3446 tb_sw_dbg(sw, "resuming switch\n");
23dd5bb4 3447
08a5e4ce
MW
3448 /*
3449 * Check for UID of the connected switches except for root
3450 * switch which we assume cannot be removed.
3451 */
3452 if (tb_route(sw)) {
3453 u64 uid;
3454
7ea4cd6b
MW
3455 /*
3456 * Check first that we can still read the switch config
3457 * space. It may be that there is now another domain
3458 * connected.
3459 */
3460 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3461 if (err < 0) {
3462 tb_sw_info(sw, "switch not present anymore\n");
3463 return err;
3464 }
3465
a283de3e
ML
3466 /* We don't have any way to confirm this was the same device */
3467 if (!sw->uid)
3468 return -ENODEV;
3469
b0407983
MW
3470 if (tb_switch_is_usb4(sw))
3471 err = usb4_switch_read_uid(sw, &uid);
3472 else
3473 err = tb_drom_read_uid_only(sw, &uid);
08a5e4ce
MW
3474 if (err) {
3475 tb_sw_warn(sw, "uid read failed\n");
3476 return err;
3477 }
3478 if (sw->uid != uid) {
3479 tb_sw_info(sw,
3480 "changed while suspended (uid %#llx -> %#llx)\n",
3481 sw->uid, uid);
3482 return -ENODEV;
3483 }
23dd5bb4
AN
3484 }
3485
b0407983 3486 err = tb_switch_configure(sw);
23dd5bb4
AN
3487 if (err)
3488 return err;
3489
b2911a59
MW
3490 /* Disable wakes */
3491 tb_switch_set_wake(sw, 0);
3492
8145c435
MW
3493 err = tb_switch_tmu_init(sw);
3494 if (err)
3495 return err;
3496
23dd5bb4 3497 /* check for surviving downstream switches */
b433d010 3498 tb_switch_for_each_port(sw, port) {
3fb10ea4
RM
3499 if (!tb_port_is_null(port))
3500 continue;
3501
3502 if (!tb_port_resume(port))
23dd5bb4 3503 continue;
dfe40ca4 3504
7ea4cd6b 3505 if (tb_wait_for_port(port, true) <= 0) {
23dd5bb4
AN
3506 tb_port_warn(port,
3507 "lost during suspend, disconnecting\n");
7ea4cd6b
MW
3508 if (tb_port_has_remote(port))
3509 tb_sw_set_unplugged(port->remote->sw);
3510 else if (port->xdomain)
3511 port->xdomain->is_unplugged = true;
3fb10ea4 3512 } else {
b0407983
MW
3513 /*
3514 * Always unlock the port so the downstream
3515 * switch/domain is accessible.
3516 */
3517 if (tb_port_unlock(port))
3518 tb_port_warn(port, "failed to unlock port\n");
3519 if (port->remote && tb_switch_resume(port->remote->sw)) {
7ea4cd6b
MW
3520 tb_port_warn(port,
3521 "lost during suspend, disconnecting\n");
3522 tb_sw_set_unplugged(port->remote->sw);
3523 }
23dd5bb4
AN
3524 }
3525 }
3526 return 0;
3527}
3528
6ac6faee
MW
3529/**
3530 * tb_switch_suspend() - Put a switch to sleep
3531 * @sw: Switch to suspend
3532 * @runtime: Is this runtime suspend or system sleep
3533 *
3534 * Suspends router and all its children. Enables wakes according to
3535 * value of @runtime and then sets sleep bit for the router. If @sw is
3536 * host router the domain is ready to go to sleep once this function
3537 * returns.
3538 */
3539void tb_switch_suspend(struct tb_switch *sw, bool runtime)
23dd5bb4 3540{
b2911a59 3541 unsigned int flags = 0;
b433d010
MW
3542 struct tb_port *port;
3543 int err;
3544
6ac6faee
MW
3545 tb_sw_dbg(sw, "suspending switch\n");
3546
43f977bc
GF
3547 /*
3548 * Actually only needed for Titan Ridge but for simplicity can be
3549 * done for USB4 device too as CLx is re-enabled at resume.
3550 */
35627353 3551 tb_switch_clx_disable(sw);
43f977bc 3552
23dd5bb4
AN
3553 err = tb_plug_events_active(sw, false);
3554 if (err)
3555 return;
3556
b433d010
MW
3557 tb_switch_for_each_port(sw, port) {
3558 if (tb_port_has_remote(port))
6ac6faee 3559 tb_switch_suspend(port->remote->sw, runtime);
23dd5bb4 3560 }
5480dfc2 3561
6ac6faee
MW
3562 if (runtime) {
3563 /* Trigger wake when something is plugged in/out */
3564 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
6026b703
MW
3565 flags |= TB_WAKE_ON_USB4;
3566 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
6ac6faee
MW
3567 } else if (device_may_wakeup(&sw->dev)) {
3568 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3569 }
b2911a59
MW
3570
3571 tb_switch_set_wake(sw, flags);
3572
b0407983
MW
3573 if (tb_switch_is_usb4(sw))
3574 usb4_switch_set_sleep(sw);
3575 else
3576 tb_lc_set_sleep(sw);
23dd5bb4 3577}
f67cf491 3578
8afe909b
MW
3579/**
3580 * tb_switch_query_dp_resource() - Query availability of DP resource
3581 * @sw: Switch whose DP resource is queried
3582 * @in: DP IN port
3583 *
3584 * Queries availability of DP resource for DP tunneling using switch
3585 * specific means. Returns %true if resource is available.
3586 */
3587bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3588{
b0407983
MW
3589 if (tb_switch_is_usb4(sw))
3590 return usb4_switch_query_dp_resource(sw, in);
8afe909b
MW
3591 return tb_lc_dp_sink_query(sw, in);
3592}
3593
3594/**
3595 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3596 * @sw: Switch whose DP resource is allocated
3597 * @in: DP IN port
3598 *
3599 * Allocates DP resource for DP tunneling. The resource must be
3600 * available for this to succeed (see tb_switch_query_dp_resource()).
3601 * Returns %0 in success and negative errno otherwise.
3602 */
3603int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3604{
ce05b997
MW
3605 int ret;
3606
b0407983 3607 if (tb_switch_is_usb4(sw))
ce05b997
MW
3608 ret = usb4_switch_alloc_dp_resource(sw, in);
3609 else
3610 ret = tb_lc_dp_sink_alloc(sw, in);
3611
3612 if (ret)
3613 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3614 in->port);
3615 else
3616 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3617
3618 return ret;
8afe909b
MW
3619}
3620
3621/**
3622 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3623 * @sw: Switch whose DP resource is de-allocated
3624 * @in: DP IN port
3625 *
3626 * De-allocates DP resource that was previously allocated for DP
3627 * tunneling.
3628 */
3629void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3630{
b0407983
MW
3631 int ret;
3632
3633 if (tb_switch_is_usb4(sw))
3634 ret = usb4_switch_dealloc_dp_resource(sw, in);
3635 else
3636 ret = tb_lc_dp_sink_dealloc(sw, in);
3637
3638 if (ret)
8afe909b
MW
3639 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3640 in->port);
ce05b997
MW
3641 else
3642 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
8afe909b
MW
3643}
3644
f67cf491
MW
3645struct tb_sw_lookup {
3646 struct tb *tb;
3647 u8 link;
3648 u8 depth;
7c39ffe7 3649 const uuid_t *uuid;
8e9267bb 3650 u64 route;
f67cf491
MW
3651};
3652
418e3ea1 3653static int tb_switch_match(struct device *dev, const void *data)
f67cf491
MW
3654{
3655 struct tb_switch *sw = tb_to_switch(dev);
418e3ea1 3656 const struct tb_sw_lookup *lookup = data;
f67cf491
MW
3657
3658 if (!sw)
3659 return 0;
3660 if (sw->tb != lookup->tb)
3661 return 0;
3662
3663 if (lookup->uuid)
3664 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3665
8e9267bb
RM
3666 if (lookup->route) {
3667 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3668 sw->config.route_hi == upper_32_bits(lookup->route);
3669 }
3670
f67cf491
MW
3671 /* Root switch is matched only by depth */
3672 if (!lookup->depth)
3673 return !sw->depth;
3674
3675 return sw->link == lookup->link && sw->depth == lookup->depth;
3676}
3677
3678/**
3679 * tb_switch_find_by_link_depth() - Find switch by link and depth
3680 * @tb: Domain the switch belongs
3681 * @link: Link number the switch is connected
3682 * @depth: Depth of the switch in link
3683 *
3684 * Returned switch has reference count increased so the caller needs to
3685 * call tb_switch_put() when done with the switch.
3686 */
3687struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3688{
3689 struct tb_sw_lookup lookup;
3690 struct device *dev;
3691
3692 memset(&lookup, 0, sizeof(lookup));
3693 lookup.tb = tb;
3694 lookup.link = link;
3695 lookup.depth = depth;
3696
3697 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3698 if (dev)
3699 return tb_to_switch(dev);
3700
3701 return NULL;
3702}
3703
3704/**
432019d6 3705 * tb_switch_find_by_uuid() - Find switch by UUID
f67cf491
MW
3706 * @tb: Domain the switch belongs
3707 * @uuid: UUID to look for
3708 *
3709 * Returned switch has reference count increased so the caller needs to
3710 * call tb_switch_put() when done with the switch.
3711 */
7c39ffe7 3712struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
f67cf491
MW
3713{
3714 struct tb_sw_lookup lookup;
3715 struct device *dev;
3716
3717 memset(&lookup, 0, sizeof(lookup));
3718 lookup.tb = tb;
3719 lookup.uuid = uuid;
3720
3721 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3722 if (dev)
3723 return tb_to_switch(dev);
3724
3725 return NULL;
3726}
e6b245cc 3727
8e9267bb
RM
3728/**
3729 * tb_switch_find_by_route() - Find switch by route string
3730 * @tb: Domain the switch belongs
3731 * @route: Route string to look for
3732 *
3733 * Returned switch has reference count increased so the caller needs to
3734 * call tb_switch_put() when done with the switch.
3735 */
3736struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3737{
3738 struct tb_sw_lookup lookup;
3739 struct device *dev;
3740
3741 if (!route)
3742 return tb_switch_get(tb->root_switch);
3743
3744 memset(&lookup, 0, sizeof(lookup));
3745 lookup.tb = tb;
3746 lookup.route = route;
3747
3748 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3749 if (dev)
3750 return tb_to_switch(dev);
3751
3752 return NULL;
3753}
3754
386e5e29
MW
3755/**
3756 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3757 * @sw: Switch to find the port from
3758 * @type: Port type to look for
3759 */
3760struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3761 enum tb_port_type type)
3762{
3763 struct tb_port *port;
3764
3765 tb_switch_for_each_port(sw, port) {
3766 if (port->config.type == type)
3767 return port;
3768 }
3769
3770 return NULL;
3771}
8a90e4fa 3772
43f977bc
GF
3773/*
3774 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3775 * device. For now used only for Titan Ridge.
3776 */
3777static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3778 unsigned int pcie_offset, u32 value)
3779{
3780 u32 offset, command, val;
3781 int ret;
3782
3783 if (sw->generation != 3)
3784 return -EOPNOTSUPP;
3785
3786 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3787 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3788 if (ret)
3789 return ret;
3790
3791 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3792 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3793 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3794 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3795 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3796 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3797
3798 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3799
3800 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3801 if (ret)
3802 return ret;
3803
3804 ret = tb_switch_wait_for_bit(sw, offset,
3805 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3806 if (ret)
3807 return ret;
3808
3809 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3810 if (ret)
3811 return ret;
3812
3813 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3814 return -ETIMEDOUT;
3815
3816 return 0;
3817}
3818
3819/**
3820 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3821 * @sw: Router to enable PCIe L1
3822 *
3823 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3824 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3825 * was configured. Due to Intel platforms limitation, shall be called only
3826 * for first hop switch.
3827 */
3828int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3829{
3830 struct tb_switch *parent = tb_switch_parent(sw);
3831 int ret;
3832
3833 if (!tb_route(sw))
3834 return 0;
3835
3836 if (!tb_switch_is_titan_ridge(sw))
3837 return 0;
3838
3839 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3840 if (tb_route(parent))
3841 return 0;
3842
3843 /* Write to downstream PCIe bridge #5 aka Dn4 */
3844 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3845 if (ret)
3846 return ret;
3847
3848 /* Write to Upstream PCIe bridge #0 aka Up0 */
3849 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3850}
30a4eca6
MW
3851
3852/**
3853 * tb_switch_xhci_connect() - Connect internal xHCI
3854 * @sw: Router whose xHCI to connect
3855 *
3856 * Can be called to any router. For Alpine Ridge and Titan Ridge
3857 * performs special flows that bring the xHCI functional for any device
3858 * connected to the type-C port. Call only after PCIe tunnel has been
3859 * established. The function only does the connect if not done already
3860 * so can be called several times for the same router.
3861 */
3862int tb_switch_xhci_connect(struct tb_switch *sw)
3863{
30a4eca6
MW
3864 struct tb_port *port1, *port3;
3865 int ret;
3866
93a3c0d4
MW
3867 if (sw->generation != 3)
3868 return 0;
3869
30a4eca6
MW
3870 port1 = &sw->ports[1];
3871 port3 = &sw->ports[3];
3872
3873 if (tb_switch_is_alpine_ridge(sw)) {
93a3c0d4
MW
3874 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3875
30a4eca6
MW
3876 usb_port1 = tb_lc_is_usb_plugged(port1);
3877 usb_port3 = tb_lc_is_usb_plugged(port3);
3878 xhci_port1 = tb_lc_is_xhci_connected(port1);
3879 xhci_port3 = tb_lc_is_xhci_connected(port3);
3880
3881 /* Figure out correct USB port to connect */
3882 if (usb_port1 && !xhci_port1) {
3883 ret = tb_lc_xhci_connect(port1);
3884 if (ret)
3885 return ret;
3886 }
3887 if (usb_port3 && !xhci_port3)
3888 return tb_lc_xhci_connect(port3);
3889 } else if (tb_switch_is_titan_ridge(sw)) {
3890 ret = tb_lc_xhci_connect(port1);
3891 if (ret)
3892 return ret;
3893 return tb_lc_xhci_connect(port3);
3894 }
3895
3896 return 0;
3897}
3898
3899/**
3900 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3901 * @sw: Router whose xHCI to disconnect
3902 *
3903 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3904 * ports.
3905 */
3906void tb_switch_xhci_disconnect(struct tb_switch *sw)
3907{
3908 if (sw->generation == 3) {
3909 struct tb_port *port1 = &sw->ports[1];
3910 struct tb_port *port3 = &sw->ports[3];
3911
3912 tb_lc_xhci_disconnect(port1);
3913 tb_port_dbg(port1, "disconnected xHCI\n");
3914 tb_lc_xhci_disconnect(port3);
3915 tb_port_dbg(port3, "disconnected xHCI\n");
3916 }
3917}