thunderbolt: Do not resume routers if UID is not set
[linux-block.git] / drivers / thunderbolt / switch.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a25c8b2f 2/*
15c6784c 3 * Thunderbolt driver - switch/port utility functions
a25c8b2f
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
15c6784c 6 * Copyright (C) 2018, Intel Corporation
a25c8b2f
AN
7 */
8
9#include <linux/delay.h>
e6b245cc
MW
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
2d8ff0b5 12#include <linux/pm_runtime.h>
09f11b6c 13#include <linux/sched/signal.h>
e6b245cc 14#include <linux/sizes.h>
10fefe56 15#include <linux/slab.h>
fa487b2a 16#include <linux/module.h>
a25c8b2f
AN
17
18#include "tb.h"
19
e6b245cc
MW
20/* Switch NVM support */
21
e6b245cc 22#define NVM_CSS 0x10
e6b245cc
MW
23
24struct nvm_auth_status {
25 struct list_head list;
7c39ffe7 26 uuid_t uuid;
e6b245cc
MW
27 u32 status;
28};
29
fa487b2a
GF
30static bool clx_enabled = true;
31module_param_named(clx, clx_enabled, bool, 0444);
32MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
33
e6b245cc
MW
34/*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39static LIST_HEAD(nvm_auth_status_cache);
40static DEFINE_MUTEX(nvm_auth_status_lock);
41
42static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43{
44 struct nvm_auth_status *st;
45
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
7c39ffe7 47 if (uuid_equal(&st->uuid, sw->uuid))
e6b245cc
MW
48 return st;
49 }
50
51 return NULL;
52}
53
54static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55{
56 struct nvm_auth_status *st;
57
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
61
62 *status = st ? st->status : 0;
63}
64
65static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66{
67 struct nvm_auth_status *st;
68
69 if (WARN_ON(!sw->uuid))
70 return;
71
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
74
75 if (!st) {
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
77 if (!st)
78 goto unlock;
79
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
83 }
84
85 st->status = status;
86unlock:
87 mutex_unlock(&nvm_auth_status_lock);
88}
89
90static void nvm_clear_auth_status(const struct tb_switch *sw)
91{
92 struct nvm_auth_status *st;
93
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
96 if (st) {
97 list_del(&st->list);
98 kfree(st);
99 }
100 mutex_unlock(&nvm_auth_status_lock);
101}
102
103static int nvm_validate_and_write(struct tb_switch *sw)
104{
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
107 u16 ds_size;
108 int ret;
109
110 if (!buf)
111 return -EINVAL;
112
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 return -EINVAL;
116
117 /*
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
120 */
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
123 return -EINVAL;
124
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
127 return -EINVAL;
128
129 /*
130 * Read digital section size and check that it also fits inside
131 * the image.
132 */
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
135 return -EINVAL;
136
137 if (!sw->safe_mode) {
138 u16 device_id;
139
140 /*
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
143 */
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
146 return -EINVAL;
147
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 /* Skip headers in the image */
158 buf += hdr_size;
159 image_size -= hdr_size;
160 }
161
b0407983 162 if (tb_switch_is_usb4(sw))
4b794f80
ML
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 else
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 if (!ret)
167 sw->nvm->flushed = true;
168 return ret;
e6b245cc
MW
169}
170
b0407983 171static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
e6b245cc 172{
7a7ebfa8 173 int ret = 0;
e6b245cc
MW
174
175 /*
176 * Root switch NVM upgrade requires that we disconnect the
d1ff7024 177 * existing paths first (in case it is not in safe mode
e6b245cc
MW
178 * already).
179 */
180 if (!sw->safe_mode) {
7a7ebfa8
MW
181 u32 status;
182
d1ff7024 183 ret = tb_domain_disconnect_all_paths(sw->tb);
e6b245cc
MW
184 if (ret)
185 return ret;
186 /*
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
189 */
190 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
191 if (!ret || ret == -ETIMEDOUT)
192 return 0;
193
194 /*
195 * Any error from update auth operation requires power
196 * cycling of the host router.
197 */
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
e6b245cc
MW
201 }
202
203 /*
204 * From safe mode we can get out by just power cycling the
205 * switch.
206 */
207 dma_port_power_cycle(sw->dma_port);
7a7ebfa8 208 return ret;
e6b245cc
MW
209}
210
b0407983 211static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
e6b245cc
MW
212{
213 int ret, retries = 10;
214
215 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
216 switch (ret) {
217 case 0:
218 case -ETIMEDOUT:
219 case -EACCES:
220 case -EINVAL:
221 /* Power cycle is required */
222 break;
223 default:
e6b245cc 224 return ret;
7a7ebfa8 225 }
e6b245cc
MW
226
227 /*
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
232 */
233 do {
234 u32 status;
235
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
238 return ret;
239 if (ret > 0) {
240 if (status) {
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
243 }
244
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
247 return 0;
248 }
249
250 msleep(500);
251 } while (--retries);
252
253 return -ETIMEDOUT;
254}
255
b0407983
MW
256static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257{
258 struct pci_dev *root_port;
259
260 /*
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
265 */
6ae72bfa 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
267 if (root_port)
268 pm_runtime_get_noresume(&root_port->dev);
269}
270
271static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272{
273 struct pci_dev *root_port;
274
6ae72bfa 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
276 if (root_port)
277 pm_runtime_put(&root_port->dev);
278}
279
280static inline bool nvm_readable(struct tb_switch *sw)
281{
282 if (tb_switch_is_usb4(sw)) {
283 /*
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
288 */
289 return usb4_switch_nvm_sector_size(sw) > 0;
290 }
291
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
294}
295
296static inline bool nvm_upgradeable(struct tb_switch *sw)
297{
298 if (sw->no_nvm_upgrade)
299 return false;
300 return nvm_readable(sw);
301}
302
303static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
305{
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
309}
310
1cbf680f 311static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
b0407983
MW
312{
313 int ret;
314
1cbf680f
MW
315 if (tb_switch_is_usb4(sw)) {
316 if (auth_only) {
317 ret = usb4_switch_nvm_set_offset(sw, 0);
318 if (ret)
319 return ret;
320 }
321 sw->nvm->authenticating = true;
b0407983 322 return usb4_switch_nvm_authenticate(sw);
1cbf680f
MW
323 } else if (auth_only) {
324 return -EOPNOTSUPP;
325 }
b0407983 326
1cbf680f 327 sw->nvm->authenticating = true;
b0407983
MW
328 if (!tb_route(sw)) {
329 nvm_authenticate_start_dma_port(sw);
330 ret = nvm_authenticate_host_dma_port(sw);
331 } else {
332 ret = nvm_authenticate_device_dma_port(sw);
333 }
334
335 return ret;
336}
337
e6b245cc
MW
338static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
339 size_t bytes)
340{
719a5fe8
MW
341 struct tb_nvm *nvm = priv;
342 struct tb_switch *sw = tb_to_switch(nvm->dev);
2d8ff0b5
MW
343 int ret;
344
345 pm_runtime_get_sync(&sw->dev);
4f7c2e0d
MW
346
347 if (!mutex_trylock(&sw->tb->lock)) {
348 ret = restart_syscall();
349 goto out;
350 }
351
b0407983 352 ret = nvm_read(sw, offset, val, bytes);
4f7c2e0d
MW
353 mutex_unlock(&sw->tb->lock);
354
355out:
2d8ff0b5
MW
356 pm_runtime_mark_last_busy(&sw->dev);
357 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 358
2d8ff0b5 359 return ret;
e6b245cc
MW
360}
361
362static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
363 size_t bytes)
364{
719a5fe8
MW
365 struct tb_nvm *nvm = priv;
366 struct tb_switch *sw = tb_to_switch(nvm->dev);
367 int ret;
e6b245cc 368
09f11b6c
MW
369 if (!mutex_trylock(&sw->tb->lock))
370 return restart_syscall();
e6b245cc
MW
371
372 /*
373 * Since writing the NVM image might require some special steps,
374 * for example when CSS headers are written, we cache the image
375 * locally here and handle the special cases when the user asks
376 * us to authenticate the image.
377 */
719a5fe8 378 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
09f11b6c 379 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
380
381 return ret;
382}
383
e6b245cc
MW
384static int tb_switch_nvm_add(struct tb_switch *sw)
385{
719a5fe8 386 struct tb_nvm *nvm;
e6b245cc
MW
387 u32 val;
388 int ret;
389
b0407983 390 if (!nvm_readable(sw))
e6b245cc
MW
391 return 0;
392
b0407983
MW
393 /*
394 * The NVM format of non-Intel hardware is not known so
395 * currently restrict NVM upgrade for Intel hardware. We may
396 * relax this in the future when we learn other NVM formats.
397 */
83d17036
MW
398 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
399 sw->config.vendor_id != 0x8087) {
b0407983
MW
400 dev_info(&sw->dev,
401 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
402 sw->config.vendor_id);
403 return 0;
404 }
405
719a5fe8
MW
406 nvm = tb_nvm_alloc(&sw->dev);
407 if (IS_ERR(nvm))
408 return PTR_ERR(nvm);
e6b245cc
MW
409
410 /*
411 * If the switch is in safe-mode the only accessible portion of
412 * the NVM is the non-active one where userspace is expected to
413 * write new functional NVM.
414 */
415 if (!sw->safe_mode) {
416 u32 nvm_size, hdr_size;
417
b0407983 418 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
e6b245cc 419 if (ret)
719a5fe8 420 goto err_nvm;
e6b245cc
MW
421
422 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
423 nvm_size = (SZ_1M << (val & 7)) / 8;
424 nvm_size = (nvm_size - hdr_size) / 2;
425
b0407983 426 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
e6b245cc 427 if (ret)
719a5fe8 428 goto err_nvm;
e6b245cc
MW
429
430 nvm->major = val >> 16;
431 nvm->minor = val >> 8;
432
719a5fe8
MW
433 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
434 if (ret)
435 goto err_nvm;
e6b245cc
MW
436 }
437
3f415e5e 438 if (!sw->no_nvm_upgrade) {
719a5fe8
MW
439 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
440 tb_switch_nvm_write);
441 if (ret)
442 goto err_nvm;
e6b245cc 443 }
e6b245cc 444
e6b245cc 445 sw->nvm = nvm;
e6b245cc
MW
446 return 0;
447
719a5fe8
MW
448err_nvm:
449 tb_nvm_free(nvm);
e6b245cc
MW
450 return ret;
451}
452
453static void tb_switch_nvm_remove(struct tb_switch *sw)
454{
719a5fe8 455 struct tb_nvm *nvm;
e6b245cc 456
e6b245cc
MW
457 nvm = sw->nvm;
458 sw->nvm = NULL;
e6b245cc
MW
459
460 if (!nvm)
461 return;
462
463 /* Remove authentication status in case the switch is unplugged */
464 if (!nvm->authenticating)
465 nvm_clear_auth_status(sw);
466
719a5fe8 467 tb_nvm_free(nvm);
e6b245cc
MW
468}
469
a25c8b2f
AN
470/* port utility functions */
471
1c561e4e 472static const char *tb_port_type(const struct tb_regs_port_header *port)
a25c8b2f
AN
473{
474 switch (port->type >> 16) {
475 case 0:
476 switch ((u8) port->type) {
477 case 0:
478 return "Inactive";
479 case 1:
480 return "Port";
481 case 2:
482 return "NHI";
483 default:
484 return "unknown";
485 }
486 case 0x2:
487 return "Ethernet";
488 case 0x8:
489 return "SATA";
490 case 0xe:
491 return "DP/HDMI";
492 case 0x10:
493 return "PCIe";
494 case 0x20:
495 return "USB";
496 default:
497 return "unknown";
498 }
499}
500
56ad3aef 501static void tb_dump_port(struct tb *tb, const struct tb_port *port)
a25c8b2f 502{
56ad3aef
MW
503 const struct tb_regs_port_header *regs = &port->config;
504
daa5140f
MW
505 tb_dbg(tb,
506 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
56ad3aef
MW
507 regs->port_number, regs->vendor_id, regs->device_id,
508 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
509 regs->type);
daa5140f 510 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
56ad3aef
MW
511 regs->max_in_hop_id, regs->max_out_hop_id);
512 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
513 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
514 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
515 port->ctl_credits);
a25c8b2f
AN
516}
517
9da672a4
AN
518/**
519 * tb_port_state() - get connectedness state of a port
5cc0df9c 520 * @port: the port to check
9da672a4
AN
521 *
522 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
523 *
524 * Return: Returns an enum tb_port_state on success or an error code on failure.
525 */
5cc0df9c 526int tb_port_state(struct tb_port *port)
9da672a4
AN
527{
528 struct tb_cap_phy phy;
529 int res;
530 if (port->cap_phy == 0) {
531 tb_port_WARN(port, "does not have a PHY\n");
532 return -EINVAL;
533 }
534 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
535 if (res)
536 return res;
537 return phy.state;
538}
539
540/**
541 * tb_wait_for_port() - wait for a port to become ready
5c6b471b
MW
542 * @port: Port to wait
543 * @wait_if_unplugged: Wait also when port is unplugged
9da672a4
AN
544 *
545 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
546 * wait_if_unplugged is set then we also wait if the port is in state
547 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
548 * switch resume). Otherwise we only wait if a device is registered but the link
549 * has not yet been established.
550 *
551 * Return: Returns an error code on failure. Returns 0 if the port is not
552 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
553 * if the port is connected and in state TB_PORT_UP.
554 */
555int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
556{
557 int retries = 10;
558 int state;
559 if (!port->cap_phy) {
560 tb_port_WARN(port, "does not have PHY\n");
561 return -EINVAL;
562 }
563 if (tb_is_upstream_port(port)) {
564 tb_port_WARN(port, "is the upstream port\n");
565 return -EINVAL;
566 }
567
568 while (retries--) {
569 state = tb_port_state(port);
570 if (state < 0)
571 return state;
572 if (state == TB_PORT_DISABLED) {
62efe699 573 tb_port_dbg(port, "is disabled (state: 0)\n");
9da672a4
AN
574 return 0;
575 }
576 if (state == TB_PORT_UNPLUGGED) {
577 if (wait_if_unplugged) {
578 /* used during resume */
62efe699
MW
579 tb_port_dbg(port,
580 "is unplugged (state: 7), retrying...\n");
9da672a4
AN
581 msleep(100);
582 continue;
583 }
62efe699 584 tb_port_dbg(port, "is unplugged (state: 7)\n");
9da672a4
AN
585 return 0;
586 }
587 if (state == TB_PORT_UP) {
62efe699 588 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
9da672a4
AN
589 return 1;
590 }
591
592 /*
593 * After plug-in the state is TB_PORT_CONNECTING. Give it some
594 * time.
595 */
62efe699
MW
596 tb_port_dbg(port,
597 "is connected, link is not up (state: %d), retrying...\n",
598 state);
9da672a4
AN
599 msleep(100);
600 }
601 tb_port_warn(port,
602 "failed to reach state TB_PORT_UP. Ignoring port...\n");
603 return 0;
604}
605
520b6702
AN
606/**
607 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
5c6b471b
MW
608 * @port: Port to add/remove NFC credits
609 * @credits: Credits to add/remove
520b6702
AN
610 *
611 * Change the number of NFC credits allocated to @port by @credits. To remove
612 * NFC credits pass a negative amount of credits.
613 *
614 * Return: Returns 0 on success or an error code on failure.
615 */
616int tb_port_add_nfc_credits(struct tb_port *port, int credits)
617{
c5ee6feb
MW
618 u32 nfc_credits;
619
620 if (credits == 0 || port->sw->is_unplugged)
520b6702 621 return 0;
c5ee6feb 622
edfbd68b
MW
623 /*
624 * USB4 restricts programming NFC buffers to lane adapters only
625 * so skip other ports.
626 */
627 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
628 return 0;
629
8f57d478 630 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
6cb27a04
MW
631 if (credits < 0)
632 credits = max_t(int, -nfc_credits, credits);
633
c5ee6feb
MW
634 nfc_credits += credits;
635
8f57d478
MW
636 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
637 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
c5ee6feb 638
8f57d478 639 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
c5ee6feb
MW
640 port->config.nfc_credits |= nfc_credits;
641
520b6702 642 return tb_port_write(port, &port->config.nfc_credits,
8f57d478 643 TB_CFG_PORT, ADP_CS_4, 1);
520b6702
AN
644}
645
646/**
647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
5c6b471b
MW
648 * @port: Port whose counters to clear
649 * @counter: Counter index to clear
520b6702
AN
650 *
651 * Return: Returns 0 on success or an error code on failure.
652 */
653int tb_port_clear_counter(struct tb_port *port, int counter)
654{
655 u32 zero[3] = { 0, 0, 0 };
62efe699 656 tb_port_dbg(port, "clearing counter %d\n", counter);
520b6702
AN
657 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
658}
659
b0407983
MW
660/**
661 * tb_port_unlock() - Unlock downstream port
662 * @port: Port to unlock
663 *
664 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
665 * downstream router accessible for CM.
666 */
667int tb_port_unlock(struct tb_port *port)
668{
669 if (tb_switch_is_icm(port->sw))
670 return 0;
671 if (!tb_port_is_null(port))
672 return -EINVAL;
673 if (tb_switch_is_usb4(port->sw))
674 return usb4_port_unlock(port);
675 return 0;
676}
677
341d4518
MW
678static int __tb_port_enable(struct tb_port *port, bool enable)
679{
680 int ret;
681 u32 phy;
682
683 if (!tb_port_is_null(port))
684 return -EINVAL;
685
686 ret = tb_port_read(port, &phy, TB_CFG_PORT,
687 port->cap_phy + LANE_ADP_CS_1, 1);
688 if (ret)
689 return ret;
690
691 if (enable)
692 phy &= ~LANE_ADP_CS_1_LD;
693 else
694 phy |= LANE_ADP_CS_1_LD;
695
696 return tb_port_write(port, &phy, TB_CFG_PORT,
697 port->cap_phy + LANE_ADP_CS_1, 1);
698}
699
700/**
701 * tb_port_enable() - Enable lane adapter
702 * @port: Port to enable (can be %NULL)
703 *
704 * This is used for lane 0 and 1 adapters to enable it.
705 */
706int tb_port_enable(struct tb_port *port)
707{
708 return __tb_port_enable(port, true);
709}
710
711/**
712 * tb_port_disable() - Disable lane adapter
713 * @port: Port to disable (can be %NULL)
714 *
715 * This is used for lane 0 and 1 adapters to disable it.
716 */
717int tb_port_disable(struct tb_port *port)
718{
719 return __tb_port_enable(port, false);
720}
721
47ba5ae4 722/*
a25c8b2f
AN
723 * tb_init_port() - initialize a port
724 *
725 * This is a helper method for tb_switch_alloc. Does not check or initialize
726 * any downstream switches.
727 *
728 * Return: Returns 0 on success or an error code on failure.
729 */
343fcb8c 730static int tb_init_port(struct tb_port *port)
a25c8b2f
AN
731{
732 int res;
9da672a4 733 int cap;
343fcb8c 734
fb7a89ad
SM
735 INIT_LIST_HEAD(&port->list);
736
737 /* Control adapter does not have configuration space */
738 if (!port->port)
739 return 0;
740
a25c8b2f 741 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
d94dcbb1
MW
742 if (res) {
743 if (res == -ENODEV) {
744 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
745 port->port);
8824d19b 746 port->disabled = true;
d94dcbb1
MW
747 return 0;
748 }
a25c8b2f 749 return res;
d94dcbb1 750 }
a25c8b2f 751
9da672a4 752 /* Port 0 is the switch itself and has no PHY. */
fb7a89ad 753 if (port->config.type == TB_TYPE_PORT) {
da2da04b 754 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
9da672a4
AN
755
756 if (cap > 0)
757 port->cap_phy = cap;
758 else
759 tb_port_WARN(port, "non switch port without a PHY\n");
b0407983
MW
760
761 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
762 if (cap > 0)
763 port->cap_usb4 = cap;
56ad3aef
MW
764
765 /*
766 * USB4 ports the buffers allocated for the control path
767 * can be read from the path config space. Legacy
768 * devices we use hard-coded value.
769 */
770 if (tb_switch_is_usb4(port->sw)) {
771 struct tb_regs_hop hop;
772
773 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
774 port->ctl_credits = hop.initial_credits;
775 }
776 if (!port->ctl_credits)
777 port->ctl_credits = 2;
778
fb7a89ad 779 } else {
56183c88
MW
780 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
781 if (cap > 0)
782 port->cap_adap = cap;
9da672a4
AN
783 }
784
56ad3aef
MW
785 port->total_credits =
786 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
787 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
788
789 tb_dump_port(port->sw->tb, port);
a25c8b2f 790 return 0;
a25c8b2f
AN
791}
792
0b2863ac
MW
793static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
794 int max_hopid)
795{
796 int port_max_hopid;
797 struct ida *ida;
798
799 if (in) {
800 port_max_hopid = port->config.max_in_hop_id;
801 ida = &port->in_hopids;
802 } else {
803 port_max_hopid = port->config.max_out_hop_id;
804 ida = &port->out_hopids;
805 }
806
12676423
MW
807 /*
808 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
809 * reserved.
810 */
a3cfebdc 811 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
0b2863ac
MW
812 min_hopid = TB_PATH_MIN_HOPID;
813
814 if (max_hopid < 0 || max_hopid > port_max_hopid)
815 max_hopid = port_max_hopid;
816
817 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
818}
819
820/**
821 * tb_port_alloc_in_hopid() - Allocate input HopID from port
822 * @port: Port to allocate HopID for
823 * @min_hopid: Minimum acceptable input HopID
824 * @max_hopid: Maximum acceptable input HopID
825 *
826 * Return: HopID between @min_hopid and @max_hopid or negative errno in
827 * case of error.
828 */
829int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
830{
831 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
832}
833
834/**
835 * tb_port_alloc_out_hopid() - Allocate output HopID from port
836 * @port: Port to allocate HopID for
837 * @min_hopid: Minimum acceptable output HopID
838 * @max_hopid: Maximum acceptable output HopID
839 *
840 * Return: HopID between @min_hopid and @max_hopid or negative errno in
841 * case of error.
842 */
843int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
844{
845 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
846}
847
848/**
849 * tb_port_release_in_hopid() - Release allocated input HopID from port
850 * @port: Port whose HopID to release
851 * @hopid: HopID to release
852 */
853void tb_port_release_in_hopid(struct tb_port *port, int hopid)
854{
855 ida_simple_remove(&port->in_hopids, hopid);
856}
857
858/**
859 * tb_port_release_out_hopid() - Release allocated output HopID from port
860 * @port: Port whose HopID to release
861 * @hopid: HopID to release
862 */
863void tb_port_release_out_hopid(struct tb_port *port, int hopid)
864{
865 ida_simple_remove(&port->out_hopids, hopid);
866}
867
69eb79f7
MW
868static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
869 const struct tb_switch *sw)
870{
871 u64 mask = (1ULL << parent->config.depth * 8) - 1;
872 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
873}
874
fb19fac1
MW
875/**
876 * tb_next_port_on_path() - Return next port for given port on a path
877 * @start: Start port of the walk
878 * @end: End port of the walk
879 * @prev: Previous port (%NULL if this is the first)
880 *
881 * This function can be used to walk from one port to another if they
882 * are connected through zero or more switches. If the @prev is dual
883 * link port, the function follows that link and returns another end on
884 * that same link.
885 *
886 * If the @end port has been reached, return %NULL.
887 *
888 * Domain tb->lock must be held when this function is called.
889 */
890struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
891 struct tb_port *prev)
892{
893 struct tb_port *next;
894
895 if (!prev)
896 return start;
897
898 if (prev->sw == end->sw) {
899 if (prev == end)
900 return NULL;
901 return end;
902 }
903
69eb79f7
MW
904 if (tb_switch_is_reachable(prev->sw, end->sw)) {
905 next = tb_port_at(tb_route(end->sw), prev->sw);
906 /* Walk down the topology if next == prev */
fb19fac1 907 if (prev->remote &&
69eb79f7 908 (next == prev || next->dual_link_port == prev))
fb19fac1 909 next = prev->remote;
fb19fac1
MW
910 } else {
911 if (tb_is_upstream_port(prev)) {
912 next = prev->remote;
913 } else {
914 next = tb_upstream_port(prev->sw);
915 /*
916 * Keep the same link if prev and next are both
917 * dual link ports.
918 */
919 if (next->dual_link_port &&
920 next->link_nr != prev->link_nr) {
921 next = next->dual_link_port;
922 }
923 }
924 }
925
69eb79f7 926 return next != prev ? next : NULL;
fb19fac1
MW
927}
928
5b7b8c0a
MW
929/**
930 * tb_port_get_link_speed() - Get current link speed
931 * @port: Port to check (USB4 or CIO)
932 *
933 * Returns link speed in Gb/s or negative errno in case of failure.
934 */
935int tb_port_get_link_speed(struct tb_port *port)
91c0c120
MW
936{
937 u32 val, speed;
938 int ret;
939
940 if (!port->cap_phy)
941 return -EINVAL;
942
943 ret = tb_port_read(port, &val, TB_CFG_PORT,
944 port->cap_phy + LANE_ADP_CS_1, 1);
945 if (ret)
946 return ret;
947
948 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
949 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
950 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
951}
952
4210d50f
IH
953/**
954 * tb_port_get_link_width() - Get current link width
955 * @port: Port to check (USB4 or CIO)
956 *
957 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
958 * or negative errno in case of failure.
959 */
960int tb_port_get_link_width(struct tb_port *port)
91c0c120
MW
961{
962 u32 val;
963 int ret;
964
965 if (!port->cap_phy)
966 return -EINVAL;
967
968 ret = tb_port_read(port, &val, TB_CFG_PORT,
969 port->cap_phy + LANE_ADP_CS_1, 1);
970 if (ret)
971 return ret;
972
973 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
974 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
975}
976
977static bool tb_port_is_width_supported(struct tb_port *port, int width)
978{
979 u32 phy, widths;
980 int ret;
981
982 if (!port->cap_phy)
983 return false;
984
985 ret = tb_port_read(port, &phy, TB_CFG_PORT,
986 port->cap_phy + LANE_ADP_CS_0, 1);
987 if (ret)
e9d0e751 988 return false;
91c0c120
MW
989
990 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
991 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
992
993 return !!(widths & width);
994}
995
996static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
997{
998 u32 val;
999 int ret;
1000
1001 if (!port->cap_phy)
1002 return -EINVAL;
1003
1004 ret = tb_port_read(port, &val, TB_CFG_PORT,
1005 port->cap_phy + LANE_ADP_CS_1, 1);
1006 if (ret)
1007 return ret;
1008
1009 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1010 switch (width) {
1011 case 1:
1012 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1013 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1014 break;
1015 case 2:
1016 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1017 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1018 break;
1019 default:
1020 return -EINVAL;
1021 }
1022
1023 val |= LANE_ADP_CS_1_LB;
1024
1025 return tb_port_write(port, &val, TB_CFG_PORT,
1026 port->cap_phy + LANE_ADP_CS_1, 1);
1027}
1028
5cc0df9c
IH
1029/**
1030 * tb_port_lane_bonding_enable() - Enable bonding on port
1031 * @port: port to enable
1032 *
e7051bea
MW
1033 * Enable bonding by setting the link width of the port and the other
1034 * port in case of dual link port. Does not wait for the link to
1035 * actually reach the bonded state so caller needs to call
1036 * tb_port_wait_for_link_width() before enabling any paths through the
1037 * link to make sure the link is in expected state.
5cc0df9c
IH
1038 *
1039 * Return: %0 in case of success and negative errno in case of error
1040 */
1041int tb_port_lane_bonding_enable(struct tb_port *port)
91c0c120
MW
1042{
1043 int ret;
1044
1045 /*
1046 * Enable lane bonding for both links if not already enabled by
1047 * for example the boot firmware.
1048 */
1049 ret = tb_port_get_link_width(port);
1050 if (ret == 1) {
1051 ret = tb_port_set_link_width(port, 2);
1052 if (ret)
1053 return ret;
1054 }
1055
1056 ret = tb_port_get_link_width(port->dual_link_port);
1057 if (ret == 1) {
1058 ret = tb_port_set_link_width(port->dual_link_port, 2);
1059 if (ret) {
1060 tb_port_set_link_width(port, 1);
1061 return ret;
1062 }
1063 }
1064
1065 port->bonded = true;
1066 port->dual_link_port->bonded = true;
1067
1068 return 0;
1069}
1070
5cc0df9c
IH
1071/**
1072 * tb_port_lane_bonding_disable() - Disable bonding on port
1073 * @port: port to disable
1074 *
1075 * Disable bonding by setting the link width of the port and the
1076 * other port in case of dual link port.
1077 *
1078 */
1079void tb_port_lane_bonding_disable(struct tb_port *port)
91c0c120
MW
1080{
1081 port->dual_link_port->bonded = false;
1082 port->bonded = false;
1083
1084 tb_port_set_link_width(port->dual_link_port, 1);
1085 tb_port_set_link_width(port, 1);
1086}
1087
e7051bea
MW
1088/**
1089 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1090 * @port: Port to wait for
1091 * @width: Expected link width (%1 or %2)
1092 * @timeout_msec: Timeout in ms how long to wait
1093 *
1094 * Should be used after both ends of the link have been bonded (or
1095 * bonding has been disabled) to wait until the link actually reaches
1096 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1097 * within the given timeout, %0 if it did.
1098 */
1099int tb_port_wait_for_link_width(struct tb_port *port, int width,
1100 int timeout_msec)
1101{
1102 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1103 int ret;
1104
1105 do {
1106 ret = tb_port_get_link_width(port);
1107 if (ret < 0)
1108 return ret;
1109 else if (ret == width)
1110 return 0;
1111
1112 usleep_range(1000, 2000);
1113 } while (ktime_before(ktime_get(), timeout));
1114
1115 return -ETIMEDOUT;
1116}
1117
69fea377
MW
1118static int tb_port_do_update_credits(struct tb_port *port)
1119{
1120 u32 nfc_credits;
1121 int ret;
1122
1123 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1124 if (ret)
1125 return ret;
1126
1127 if (nfc_credits != port->config.nfc_credits) {
1128 u32 total;
1129
1130 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1131 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1132
1133 tb_port_dbg(port, "total credits changed %u -> %u\n",
1134 port->total_credits, total);
1135
1136 port->config.nfc_credits = nfc_credits;
1137 port->total_credits = total;
1138 }
1139
1140 return 0;
1141}
1142
1143/**
1144 * tb_port_update_credits() - Re-read port total credits
1145 * @port: Port to update
1146 *
1147 * After the link is bonded (or bonding was disabled) the port total
1148 * credits may change, so this function needs to be called to re-read
1149 * the credits. Updates also the second lane adapter.
1150 */
1151int tb_port_update_credits(struct tb_port *port)
1152{
1153 int ret;
1154
1155 ret = tb_port_do_update_credits(port);
1156 if (ret)
1157 return ret;
1158 return tb_port_do_update_credits(port->dual_link_port);
1159}
1160
fdb0887c
MW
1161static int tb_port_start_lane_initialization(struct tb_port *port)
1162{
1163 int ret;
1164
1165 if (tb_switch_is_usb4(port->sw))
1166 return 0;
1167
1168 ret = tb_lc_start_lane_initialization(port);
1169 return ret == -EINVAL ? 0 : ret;
1170}
1171
3fb10ea4
RM
1172/*
1173 * Returns true if the port had something (router, XDomain) connected
1174 * before suspend.
1175 */
1176static bool tb_port_resume(struct tb_port *port)
1177{
1178 bool has_remote = tb_port_has_remote(port);
1179
1180 if (port->usb4) {
1181 usb4_port_device_resume(port->usb4);
1182 } else if (!has_remote) {
1183 /*
1184 * For disconnected downstream lane adapters start lane
1185 * initialization now so we detect future connects.
1186 *
1187 * For XDomain start the lane initialzation now so the
1188 * link gets re-established.
1189 *
1190 * This is only needed for non-USB4 ports.
1191 */
1192 if (!tb_is_upstream_port(port) || port->xdomain)
1193 tb_port_start_lane_initialization(port);
1194 }
1195
1196 return has_remote || port->xdomain;
1197}
1198
e78db6f0
MW
1199/**
1200 * tb_port_is_enabled() - Is the adapter port enabled
1201 * @port: Port to check
1202 */
1203bool tb_port_is_enabled(struct tb_port *port)
1204{
1205 switch (port->config.type) {
1206 case TB_TYPE_PCIE_UP:
1207 case TB_TYPE_PCIE_DOWN:
1208 return tb_pci_port_is_enabled(port);
1209
4f807e47
MW
1210 case TB_TYPE_DP_HDMI_IN:
1211 case TB_TYPE_DP_HDMI_OUT:
1212 return tb_dp_port_is_enabled(port);
1213
e6f81858
RM
1214 case TB_TYPE_USB3_UP:
1215 case TB_TYPE_USB3_DOWN:
1216 return tb_usb3_port_is_enabled(port);
1217
e78db6f0
MW
1218 default:
1219 return false;
1220 }
1221}
1222
e6f81858
RM
1223/**
1224 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1225 * @port: USB3 adapter port to check
1226 */
1227bool tb_usb3_port_is_enabled(struct tb_port *port)
1228{
1229 u32 data;
1230
1231 if (tb_port_read(port, &data, TB_CFG_PORT,
1232 port->cap_adap + ADP_USB3_CS_0, 1))
1233 return false;
1234
1235 return !!(data & ADP_USB3_CS_0_PE);
1236}
1237
1238/**
1239 * tb_usb3_port_enable() - Enable USB3 adapter port
1240 * @port: USB3 adapter port to enable
1241 * @enable: Enable/disable the USB3 adapter
1242 */
1243int tb_usb3_port_enable(struct tb_port *port, bool enable)
1244{
1245 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1246 : ADP_USB3_CS_0_V;
1247
1248 if (!port->cap_adap)
1249 return -ENXIO;
1250 return tb_port_write(port, &word, TB_CFG_PORT,
1251 port->cap_adap + ADP_USB3_CS_0, 1);
1252}
1253
0414bec5
MW
1254/**
1255 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1256 * @port: PCIe port to check
1257 */
1258bool tb_pci_port_is_enabled(struct tb_port *port)
1259{
1260 u32 data;
1261
778bfca3
MW
1262 if (tb_port_read(port, &data, TB_CFG_PORT,
1263 port->cap_adap + ADP_PCIE_CS_0, 1))
0414bec5
MW
1264 return false;
1265
778bfca3 1266 return !!(data & ADP_PCIE_CS_0_PE);
0414bec5
MW
1267}
1268
93f36ade
MW
1269/**
1270 * tb_pci_port_enable() - Enable PCIe adapter port
1271 * @port: PCIe port to enable
1272 * @enable: Enable/disable the PCIe adapter
1273 */
1274int tb_pci_port_enable(struct tb_port *port, bool enable)
1275{
778bfca3 1276 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
93f36ade
MW
1277 if (!port->cap_adap)
1278 return -ENXIO;
778bfca3
MW
1279 return tb_port_write(port, &word, TB_CFG_PORT,
1280 port->cap_adap + ADP_PCIE_CS_0, 1);
93f36ade
MW
1281}
1282
4f807e47
MW
1283/**
1284 * tb_dp_port_hpd_is_active() - Is HPD already active
1285 * @port: DP out port to check
1286 *
1287 * Checks if the DP OUT adapter port has HDP bit already set.
1288 */
1289int tb_dp_port_hpd_is_active(struct tb_port *port)
1290{
1291 u32 data;
1292 int ret;
1293
98176380
MW
1294 ret = tb_port_read(port, &data, TB_CFG_PORT,
1295 port->cap_adap + ADP_DP_CS_2, 1);
4f807e47
MW
1296 if (ret)
1297 return ret;
1298
98176380 1299 return !!(data & ADP_DP_CS_2_HDP);
4f807e47
MW
1300}
1301
1302/**
1303 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1304 * @port: Port to clear HPD
1305 *
1306 * If the DP IN port has HDP set, this function can be used to clear it.
1307 */
1308int tb_dp_port_hpd_clear(struct tb_port *port)
1309{
1310 u32 data;
1311 int ret;
1312
98176380
MW
1313 ret = tb_port_read(port, &data, TB_CFG_PORT,
1314 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1315 if (ret)
1316 return ret;
1317
98176380
MW
1318 data |= ADP_DP_CS_3_HDPC;
1319 return tb_port_write(port, &data, TB_CFG_PORT,
1320 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1321}
1322
1323/**
1324 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1325 * @port: DP IN/OUT port to set hops
1326 * @video: Video Hop ID
1327 * @aux_tx: AUX TX Hop ID
1328 * @aux_rx: AUX RX Hop ID
1329 *
e5bb88e9
MW
1330 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1331 * router DP adapters too but does not program the values as the fields
1332 * are read-only.
4f807e47
MW
1333 */
1334int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1335 unsigned int aux_tx, unsigned int aux_rx)
1336{
1337 u32 data[2];
1338 int ret;
1339
e5bb88e9
MW
1340 if (tb_switch_is_usb4(port->sw))
1341 return 0;
1342
98176380
MW
1343 ret = tb_port_read(port, data, TB_CFG_PORT,
1344 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1345 if (ret)
1346 return ret;
1347
98176380
MW
1348 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1349 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1350 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1351
98176380
MW
1352 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1353 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1354 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1355 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1356 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1357
98176380
MW
1358 return tb_port_write(port, data, TB_CFG_PORT,
1359 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1360}
1361
1362/**
1363 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1364 * @port: DP adapter port to check
1365 */
1366bool tb_dp_port_is_enabled(struct tb_port *port)
1367{
fd5c46b7 1368 u32 data[2];
4f807e47 1369
98176380 1370 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
fd5c46b7 1371 ARRAY_SIZE(data)))
4f807e47
MW
1372 return false;
1373
98176380 1374 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
4f807e47
MW
1375}
1376
1377/**
1378 * tb_dp_port_enable() - Enables/disables DP paths of a port
1379 * @port: DP IN/OUT port
1380 * @enable: Enable/disable DP path
1381 *
1382 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1383 * calling this function.
1384 */
1385int tb_dp_port_enable(struct tb_port *port, bool enable)
1386{
fd5c46b7 1387 u32 data[2];
4f807e47
MW
1388 int ret;
1389
98176380
MW
1390 ret = tb_port_read(port, data, TB_CFG_PORT,
1391 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1392 if (ret)
1393 return ret;
1394
1395 if (enable)
98176380 1396 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
4f807e47 1397 else
98176380 1398 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
4f807e47 1399
98176380
MW
1400 return tb_port_write(port, data, TB_CFG_PORT,
1401 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1402}
1403
a25c8b2f
AN
1404/* switch utility functions */
1405
b0407983
MW
1406static const char *tb_switch_generation_name(const struct tb_switch *sw)
1407{
1408 switch (sw->generation) {
1409 case 1:
1410 return "Thunderbolt 1";
1411 case 2:
1412 return "Thunderbolt 2";
1413 case 3:
1414 return "Thunderbolt 3";
1415 case 4:
1416 return "USB4";
1417 default:
1418 return "Unknown";
1419 }
1420}
1421
1422static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
a25c8b2f 1423{
b0407983
MW
1424 const struct tb_regs_switch_header *regs = &sw->config;
1425
1426 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1427 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1428 regs->revision, regs->thunderbolt_version);
1429 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
daa5140f
MW
1430 tb_dbg(tb, " Config:\n");
1431 tb_dbg(tb,
a25c8b2f 1432 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
b0407983
MW
1433 regs->upstream_port_number, regs->depth,
1434 (((u64) regs->route_hi) << 32) | regs->route_lo,
1435 regs->enabled, regs->plug_events_delay);
daa5140f 1436 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
b0407983 1437 regs->__unknown1, regs->__unknown4);
a25c8b2f
AN
1438}
1439
23dd5bb4 1440/**
2c2a2327 1441 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
356b6c4e 1442 * @sw: Switch to reset
23dd5bb4
AN
1443 *
1444 * Return: Returns 0 on success or an error code on failure.
1445 */
356b6c4e 1446int tb_switch_reset(struct tb_switch *sw)
23dd5bb4
AN
1447{
1448 struct tb_cfg_result res;
356b6c4e
MW
1449
1450 if (sw->generation > 1)
1451 return 0;
1452
1453 tb_sw_dbg(sw, "resetting switch\n");
1454
1455 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1456 TB_CFG_SWITCH, 2, 2);
23dd5bb4
AN
1457 if (res.err)
1458 return res.err;
bda83aec 1459 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
23dd5bb4
AN
1460 if (res.err > 0)
1461 return -EIO;
1462 return res.err;
1463}
1464
1639664f
GF
1465/**
1466 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1467 * @sw: Router to read the offset value from
1468 * @offset: Offset in the router config space to read from
1469 * @bit: Bit mask in the offset to wait for
1470 * @value: Value of the bits to wait for
1471 * @timeout_msec: Timeout in ms how long to wait
1472 *
1473 * Wait till the specified bits in specified offset reach specified value.
1474 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1475 * within the given timeout or a negative errno in case of failure.
1476 */
1477int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1478 u32 value, int timeout_msec)
1479{
1480 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1481
1482 do {
1483 u32 val;
1484 int ret;
1485
1486 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1487 if (ret)
1488 return ret;
1489
1490 if ((val & bit) == value)
1491 return 0;
1492
1493 usleep_range(50, 100);
1494 } while (ktime_before(ktime_get(), timeout));
1495
1496 return -ETIMEDOUT;
1497}
1498
47ba5ae4 1499/*
ca389f71
AN
1500 * tb_plug_events_active() - enable/disable plug events on a switch
1501 *
1502 * Also configures a sane plug_events_delay of 255ms.
1503 *
1504 * Return: Returns 0 on success or an error code on failure.
1505 */
1506static int tb_plug_events_active(struct tb_switch *sw, bool active)
1507{
1508 u32 data;
1509 int res;
1510
5cb6ed31 1511 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
bfe778ac
MW
1512 return 0;
1513
ca389f71
AN
1514 sw->config.plug_events_delay = 0xff;
1515 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1516 if (res)
1517 return res;
1518
1519 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1520 if (res)
1521 return res;
1522
1523 if (active) {
1524 data = data & 0xFFFFFF83;
1525 switch (sw->config.device_id) {
1d111406
LW
1526 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1527 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1528 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
ca389f71
AN
1529 break;
1530 default:
30a4eca6
MW
1531 /*
1532 * Skip Alpine Ridge, it needs to have vendor
1533 * specific USB hotplug event enabled for the
1534 * internal xHCI to work.
1535 */
1536 if (!tb_switch_is_alpine_ridge(sw))
1537 data |= TB_PLUG_EVENTS_USB_DISABLE;
ca389f71
AN
1538 }
1539 } else {
1540 data = data | 0x7c;
1541 }
1542 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1543 sw->cap_plug_events + 1, 1);
1544}
1545
f67cf491
MW
1546static ssize_t authorized_show(struct device *dev,
1547 struct device_attribute *attr,
1548 char *buf)
1549{
1550 struct tb_switch *sw = tb_to_switch(dev);
1551
1552 return sprintf(buf, "%u\n", sw->authorized);
1553}
1554
3da88be2
MW
1555static int disapprove_switch(struct device *dev, void *not_used)
1556{
1651d9e7 1557 char *envp[] = { "AUTHORIZED=0", NULL };
3da88be2
MW
1558 struct tb_switch *sw;
1559
1560 sw = tb_to_switch(dev);
1561 if (sw && sw->authorized) {
1562 int ret;
1563
1564 /* First children */
1565 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1566 if (ret)
1567 return ret;
1568
1569 ret = tb_domain_disapprove_switch(sw->tb, sw);
1570 if (ret)
1571 return ret;
1572
1573 sw->authorized = 0;
1651d9e7 1574 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
3da88be2
MW
1575 }
1576
1577 return 0;
1578}
1579
f67cf491
MW
1580static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1581{
1651d9e7 1582 char envp_string[13];
f67cf491 1583 int ret = -EINVAL;
1651d9e7 1584 char *envp[] = { envp_string, NULL };
f67cf491 1585
09f11b6c
MW
1586 if (!mutex_trylock(&sw->tb->lock))
1587 return restart_syscall();
f67cf491 1588
3da88be2 1589 if (!!sw->authorized == !!val)
f67cf491
MW
1590 goto unlock;
1591
1592 switch (val) {
3da88be2
MW
1593 /* Disapprove switch */
1594 case 0:
1595 if (tb_route(sw)) {
1596 ret = disapprove_switch(&sw->dev, NULL);
1597 goto unlock;
1598 }
1599 break;
1600
f67cf491
MW
1601 /* Approve switch */
1602 case 1:
1603 if (sw->key)
1604 ret = tb_domain_approve_switch_key(sw->tb, sw);
1605 else
1606 ret = tb_domain_approve_switch(sw->tb, sw);
1607 break;
1608
1609 /* Challenge switch */
1610 case 2:
1611 if (sw->key)
1612 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1613 break;
1614
1615 default:
1616 break;
1617 }
1618
1619 if (!ret) {
1620 sw->authorized = val;
1651d9e7
RJ
1621 /*
1622 * Notify status change to the userspace, informing the new
1623 * value of /sys/bus/thunderbolt/devices/.../authorized.
1624 */
1625 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1626 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
f67cf491
MW
1627 }
1628
1629unlock:
09f11b6c 1630 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1631 return ret;
1632}
1633
1634static ssize_t authorized_store(struct device *dev,
1635 struct device_attribute *attr,
1636 const char *buf, size_t count)
1637{
1638 struct tb_switch *sw = tb_to_switch(dev);
1639 unsigned int val;
1640 ssize_t ret;
1641
1642 ret = kstrtouint(buf, 0, &val);
1643 if (ret)
1644 return ret;
1645 if (val > 2)
1646 return -EINVAL;
1647
4f7c2e0d 1648 pm_runtime_get_sync(&sw->dev);
f67cf491 1649 ret = tb_switch_set_authorized(sw, val);
4f7c2e0d
MW
1650 pm_runtime_mark_last_busy(&sw->dev);
1651 pm_runtime_put_autosuspend(&sw->dev);
f67cf491
MW
1652
1653 return ret ? ret : count;
1654}
1655static DEVICE_ATTR_RW(authorized);
1656
14862ee3
YB
1657static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1658 char *buf)
1659{
1660 struct tb_switch *sw = tb_to_switch(dev);
1661
1662 return sprintf(buf, "%u\n", sw->boot);
1663}
1664static DEVICE_ATTR_RO(boot);
1665
bfe778ac
MW
1666static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1667 char *buf)
1668{
1669 struct tb_switch *sw = tb_to_switch(dev);
ca389f71 1670
bfe778ac
MW
1671 return sprintf(buf, "%#x\n", sw->device);
1672}
1673static DEVICE_ATTR_RO(device);
1674
72ee3390
MW
1675static ssize_t
1676device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1677{
1678 struct tb_switch *sw = tb_to_switch(dev);
1679
1680 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1681}
1682static DEVICE_ATTR_RO(device_name);
1683
b406357c
CK
1684static ssize_t
1685generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1686{
1687 struct tb_switch *sw = tb_to_switch(dev);
1688
1689 return sprintf(buf, "%u\n", sw->generation);
1690}
1691static DEVICE_ATTR_RO(generation);
1692
f67cf491
MW
1693static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1694 char *buf)
1695{
1696 struct tb_switch *sw = tb_to_switch(dev);
1697 ssize_t ret;
1698
09f11b6c
MW
1699 if (!mutex_trylock(&sw->tb->lock))
1700 return restart_syscall();
f67cf491
MW
1701
1702 if (sw->key)
1703 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1704 else
1705 ret = sprintf(buf, "\n");
1706
09f11b6c 1707 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1708 return ret;
1709}
1710
1711static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1712 const char *buf, size_t count)
1713{
1714 struct tb_switch *sw = tb_to_switch(dev);
1715 u8 key[TB_SWITCH_KEY_SIZE];
1716 ssize_t ret = count;
e545f0d8 1717 bool clear = false;
f67cf491 1718
e545f0d8
BY
1719 if (!strcmp(buf, "\n"))
1720 clear = true;
1721 else if (hex2bin(key, buf, sizeof(key)))
f67cf491
MW
1722 return -EINVAL;
1723
09f11b6c
MW
1724 if (!mutex_trylock(&sw->tb->lock))
1725 return restart_syscall();
f67cf491
MW
1726
1727 if (sw->authorized) {
1728 ret = -EBUSY;
1729 } else {
1730 kfree(sw->key);
e545f0d8
BY
1731 if (clear) {
1732 sw->key = NULL;
1733 } else {
1734 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1735 if (!sw->key)
1736 ret = -ENOMEM;
1737 }
f67cf491
MW
1738 }
1739
09f11b6c 1740 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1741 return ret;
1742}
0956e411 1743static DEVICE_ATTR(key, 0600, key_show, key_store);
f67cf491 1744
91c0c120
MW
1745static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1746 char *buf)
1747{
1748 struct tb_switch *sw = tb_to_switch(dev);
1749
1750 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1751}
1752
1753/*
1754 * Currently all lanes must run at the same speed but we expose here
1755 * both directions to allow possible asymmetric links in the future.
1756 */
1757static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1758static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1759
1760static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1761 char *buf)
1762{
1763 struct tb_switch *sw = tb_to_switch(dev);
1764
1765 return sprintf(buf, "%u\n", sw->link_width);
1766}
1767
1768/*
1769 * Currently link has same amount of lanes both directions (1 or 2) but
1770 * expose them separately to allow possible asymmetric links in the future.
1771 */
1772static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1773static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1774
e6b245cc
MW
1775static ssize_t nvm_authenticate_show(struct device *dev,
1776 struct device_attribute *attr, char *buf)
1777{
1778 struct tb_switch *sw = tb_to_switch(dev);
1779 u32 status;
1780
1781 nvm_get_auth_status(sw, &status);
1782 return sprintf(buf, "%#x\n", status);
1783}
1784
1cb36293
ML
1785static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1786 bool disconnect)
e6b245cc
MW
1787{
1788 struct tb_switch *sw = tb_to_switch(dev);
1cbf680f 1789 int val, ret;
e6b245cc 1790
4f7c2e0d
MW
1791 pm_runtime_get_sync(&sw->dev);
1792
1793 if (!mutex_trylock(&sw->tb->lock)) {
1794 ret = restart_syscall();
1795 goto exit_rpm;
1796 }
e6b245cc
MW
1797
1798 /* If NVMem devices are not yet added */
1799 if (!sw->nvm) {
1800 ret = -EAGAIN;
1801 goto exit_unlock;
1802 }
1803
4b794f80 1804 ret = kstrtoint(buf, 10, &val);
e6b245cc
MW
1805 if (ret)
1806 goto exit_unlock;
1807
1808 /* Always clear the authentication status */
1809 nvm_clear_auth_status(sw);
1810
4b794f80 1811 if (val > 0) {
1cbf680f
MW
1812 if (val == AUTHENTICATE_ONLY) {
1813 if (disconnect)
4b794f80 1814 ret = -EINVAL;
1cbf680f
MW
1815 else
1816 ret = nvm_authenticate(sw, true);
1817 } else {
1818 if (!sw->nvm->flushed) {
1819 if (!sw->nvm->buf) {
1820 ret = -EINVAL;
1821 goto exit_unlock;
1822 }
1823
1824 ret = nvm_validate_and_write(sw);
1825 if (ret || val == WRITE_ONLY)
1826 goto exit_unlock;
4b794f80 1827 }
1cbf680f
MW
1828 if (val == WRITE_AND_AUTHENTICATE) {
1829 if (disconnect)
1830 ret = tb_lc_force_power(sw);
1831 else
1832 ret = nvm_authenticate(sw, false);
1cb36293 1833 }
4b794f80 1834 }
e6b245cc
MW
1835 }
1836
1837exit_unlock:
09f11b6c 1838 mutex_unlock(&sw->tb->lock);
4f7c2e0d
MW
1839exit_rpm:
1840 pm_runtime_mark_last_busy(&sw->dev);
1841 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 1842
1cb36293
ML
1843 return ret;
1844}
1845
1846static ssize_t nvm_authenticate_store(struct device *dev,
1847 struct device_attribute *attr, const char *buf, size_t count)
1848{
1849 int ret = nvm_authenticate_sysfs(dev, buf, false);
e6b245cc
MW
1850 if (ret)
1851 return ret;
1852 return count;
1853}
1854static DEVICE_ATTR_RW(nvm_authenticate);
1855
1cb36293
ML
1856static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1857 struct device_attribute *attr, char *buf)
1858{
1859 return nvm_authenticate_show(dev, attr, buf);
1860}
1861
1862static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1863 struct device_attribute *attr, const char *buf, size_t count)
1864{
1865 int ret;
1866
1867 ret = nvm_authenticate_sysfs(dev, buf, true);
1868 return ret ? ret : count;
1869}
1870static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1871
e6b245cc
MW
1872static ssize_t nvm_version_show(struct device *dev,
1873 struct device_attribute *attr, char *buf)
1874{
1875 struct tb_switch *sw = tb_to_switch(dev);
1876 int ret;
1877
09f11b6c
MW
1878 if (!mutex_trylock(&sw->tb->lock))
1879 return restart_syscall();
e6b245cc
MW
1880
1881 if (sw->safe_mode)
1882 ret = -ENODATA;
1883 else if (!sw->nvm)
1884 ret = -EAGAIN;
1885 else
1886 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1887
09f11b6c 1888 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
1889
1890 return ret;
1891}
1892static DEVICE_ATTR_RO(nvm_version);
1893
bfe778ac
MW
1894static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1895 char *buf)
a25c8b2f 1896{
bfe778ac 1897 struct tb_switch *sw = tb_to_switch(dev);
a25c8b2f 1898
bfe778ac
MW
1899 return sprintf(buf, "%#x\n", sw->vendor);
1900}
1901static DEVICE_ATTR_RO(vendor);
1902
72ee3390
MW
1903static ssize_t
1904vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1905{
1906 struct tb_switch *sw = tb_to_switch(dev);
1907
1908 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1909}
1910static DEVICE_ATTR_RO(vendor_name);
1911
bfe778ac
MW
1912static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1913 char *buf)
1914{
1915 struct tb_switch *sw = tb_to_switch(dev);
1916
1917 return sprintf(buf, "%pUb\n", sw->uuid);
1918}
1919static DEVICE_ATTR_RO(unique_id);
1920
1921static struct attribute *switch_attrs[] = {
f67cf491 1922 &dev_attr_authorized.attr,
14862ee3 1923 &dev_attr_boot.attr,
bfe778ac 1924 &dev_attr_device.attr,
72ee3390 1925 &dev_attr_device_name.attr,
b406357c 1926 &dev_attr_generation.attr,
f67cf491 1927 &dev_attr_key.attr,
e6b245cc 1928 &dev_attr_nvm_authenticate.attr,
1cb36293 1929 &dev_attr_nvm_authenticate_on_disconnect.attr,
e6b245cc 1930 &dev_attr_nvm_version.attr,
91c0c120
MW
1931 &dev_attr_rx_speed.attr,
1932 &dev_attr_rx_lanes.attr,
1933 &dev_attr_tx_speed.attr,
1934 &dev_attr_tx_lanes.attr,
bfe778ac 1935 &dev_attr_vendor.attr,
72ee3390 1936 &dev_attr_vendor_name.attr,
bfe778ac
MW
1937 &dev_attr_unique_id.attr,
1938 NULL,
1939};
1940
f67cf491
MW
1941static umode_t switch_attr_is_visible(struct kobject *kobj,
1942 struct attribute *attr, int n)
1943{
fff15f23 1944 struct device *dev = kobj_to_dev(kobj);
f67cf491
MW
1945 struct tb_switch *sw = tb_to_switch(dev);
1946
3cd542e6
MW
1947 if (attr == &dev_attr_authorized.attr) {
1948 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
8e334125 1949 sw->tb->security_level == TB_SECURITY_DPONLY)
3cd542e6
MW
1950 return 0;
1951 } else if (attr == &dev_attr_device.attr) {
58f414fa
MW
1952 if (!sw->device)
1953 return 0;
1954 } else if (attr == &dev_attr_device_name.attr) {
1955 if (!sw->device_name)
1956 return 0;
1957 } else if (attr == &dev_attr_vendor.attr) {
1958 if (!sw->vendor)
1959 return 0;
1960 } else if (attr == &dev_attr_vendor_name.attr) {
1961 if (!sw->vendor_name)
1962 return 0;
1963 } else if (attr == &dev_attr_key.attr) {
f67cf491
MW
1964 if (tb_route(sw) &&
1965 sw->tb->security_level == TB_SECURITY_SECURE &&
1966 sw->security_level == TB_SECURITY_SECURE)
1967 return attr->mode;
1968 return 0;
91c0c120
MW
1969 } else if (attr == &dev_attr_rx_speed.attr ||
1970 attr == &dev_attr_rx_lanes.attr ||
1971 attr == &dev_attr_tx_speed.attr ||
1972 attr == &dev_attr_tx_lanes.attr) {
1973 if (tb_route(sw))
1974 return attr->mode;
1975 return 0;
3f415e5e 1976 } else if (attr == &dev_attr_nvm_authenticate.attr) {
b0407983 1977 if (nvm_upgradeable(sw))
3f415e5e
MW
1978 return attr->mode;
1979 return 0;
1980 } else if (attr == &dev_attr_nvm_version.attr) {
b0407983 1981 if (nvm_readable(sw))
e6b245cc
MW
1982 return attr->mode;
1983 return 0;
14862ee3
YB
1984 } else if (attr == &dev_attr_boot.attr) {
1985 if (tb_route(sw))
1986 return attr->mode;
1987 return 0;
1cb36293
ML
1988 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1989 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1990 return attr->mode;
1991 return 0;
f67cf491
MW
1992 }
1993
e6b245cc 1994 return sw->safe_mode ? 0 : attr->mode;
f67cf491
MW
1995}
1996
6889e00f 1997static const struct attribute_group switch_group = {
f67cf491 1998 .is_visible = switch_attr_is_visible,
bfe778ac
MW
1999 .attrs = switch_attrs,
2000};
ca389f71 2001
bfe778ac
MW
2002static const struct attribute_group *switch_groups[] = {
2003 &switch_group,
2004 NULL,
2005};
2006
2007static void tb_switch_release(struct device *dev)
2008{
2009 struct tb_switch *sw = tb_to_switch(dev);
b433d010 2010 struct tb_port *port;
bfe778ac 2011
3e136768
MW
2012 dma_port_free(sw->dma_port);
2013
b433d010 2014 tb_switch_for_each_port(sw, port) {
781e14ea
MW
2015 ida_destroy(&port->in_hopids);
2016 ida_destroy(&port->out_hopids);
0b2863ac
MW
2017 }
2018
bfe778ac 2019 kfree(sw->uuid);
72ee3390
MW
2020 kfree(sw->device_name);
2021 kfree(sw->vendor_name);
a25c8b2f 2022 kfree(sw->ports);
343fcb8c 2023 kfree(sw->drom);
f67cf491 2024 kfree(sw->key);
a25c8b2f
AN
2025 kfree(sw);
2026}
2027
2f608ba1
MW
2028static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2029{
2030 struct tb_switch *sw = tb_to_switch(dev);
2031 const char *type;
2032
2033 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2034 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2035 return -ENOMEM;
2036 }
2037
2038 if (!tb_route(sw)) {
2039 type = "host";
2040 } else {
2041 const struct tb_port *port;
2042 bool hub = false;
2043
2044 /* Device is hub if it has any downstream ports */
2045 tb_switch_for_each_port(sw, port) {
2046 if (!port->disabled && !tb_is_upstream_port(port) &&
2047 tb_port_is_null(port)) {
2048 hub = true;
2049 break;
2050 }
2051 }
2052
2053 type = hub ? "hub" : "device";
2054 }
2055
2056 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2057 return -ENOMEM;
2058 return 0;
2059}
2060
2d8ff0b5
MW
2061/*
2062 * Currently only need to provide the callbacks. Everything else is handled
2063 * in the connection manager.
2064 */
2065static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2066{
4f7c2e0d
MW
2067 struct tb_switch *sw = tb_to_switch(dev);
2068 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2069
2070 if (cm_ops->runtime_suspend_switch)
2071 return cm_ops->runtime_suspend_switch(sw);
2072
2d8ff0b5
MW
2073 return 0;
2074}
2075
2076static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2077{
4f7c2e0d
MW
2078 struct tb_switch *sw = tb_to_switch(dev);
2079 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2080
2081 if (cm_ops->runtime_resume_switch)
2082 return cm_ops->runtime_resume_switch(sw);
2d8ff0b5
MW
2083 return 0;
2084}
2085
2086static const struct dev_pm_ops tb_switch_pm_ops = {
2087 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2088 NULL)
2089};
2090
bfe778ac
MW
2091struct device_type tb_switch_type = {
2092 .name = "thunderbolt_device",
2093 .release = tb_switch_release,
2f608ba1 2094 .uevent = tb_switch_uevent,
2d8ff0b5 2095 .pm = &tb_switch_pm_ops,
bfe778ac
MW
2096};
2097
2c3c4197
MW
2098static int tb_switch_get_generation(struct tb_switch *sw)
2099{
2100 switch (sw->config.device_id) {
2101 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2102 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2103 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2104 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2105 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2106 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2107 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2108 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2109 return 1;
2110
2111 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2112 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2113 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2114 return 2;
2115
2116 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2117 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2118 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2119 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2120 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
4bac471d
RM
2121 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2122 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2123 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
3cdb9446
MW
2124 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2125 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2c3c4197
MW
2126 return 3;
2127
2128 default:
b0407983
MW
2129 if (tb_switch_is_usb4(sw))
2130 return 4;
2131
2c3c4197
MW
2132 /*
2133 * For unknown switches assume generation to be 1 to be
2134 * on the safe side.
2135 */
2136 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2137 sw->config.device_id);
2138 return 1;
2139 }
2140}
2141
b0407983
MW
2142static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2143{
2144 int max_depth;
2145
2146 if (tb_switch_is_usb4(sw) ||
2147 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2148 max_depth = USB4_SWITCH_MAX_DEPTH;
2149 else
2150 max_depth = TB_SWITCH_MAX_DEPTH;
2151
2152 return depth > max_depth;
2153}
2154
a25c8b2f 2155/**
bfe778ac
MW
2156 * tb_switch_alloc() - allocate a switch
2157 * @tb: Pointer to the owning domain
2158 * @parent: Parent device for this switch
2159 * @route: Route string for this switch
a25c8b2f 2160 *
bfe778ac
MW
2161 * Allocates and initializes a switch. Will not upload configuration to
2162 * the switch. For that you need to call tb_switch_configure()
2163 * separately. The returned switch should be released by calling
2164 * tb_switch_put().
2165 *
444ac384
MW
2166 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2167 * failure.
a25c8b2f 2168 */
bfe778ac
MW
2169struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2170 u64 route)
a25c8b2f 2171{
a25c8b2f 2172 struct tb_switch *sw;
f0342e75 2173 int upstream_port;
444ac384 2174 int i, ret, depth;
f0342e75 2175
b0407983
MW
2176 /* Unlock the downstream port so we can access the switch below */
2177 if (route) {
2178 struct tb_switch *parent_sw = tb_to_switch(parent);
2179 struct tb_port *down;
2180
2181 down = tb_port_at(route, parent_sw);
2182 tb_port_unlock(down);
2183 }
2184
f0342e75 2185 depth = tb_route_length(route);
f0342e75
MW
2186
2187 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
a25c8b2f 2188 if (upstream_port < 0)
444ac384 2189 return ERR_PTR(upstream_port);
a25c8b2f
AN
2190
2191 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2192 if (!sw)
444ac384 2193 return ERR_PTR(-ENOMEM);
a25c8b2f
AN
2194
2195 sw->tb = tb;
444ac384
MW
2196 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2197 if (ret)
bfe778ac
MW
2198 goto err_free_sw_ports;
2199
b0407983
MW
2200 sw->generation = tb_switch_get_generation(sw);
2201
daa5140f 2202 tb_dbg(tb, "current switch config:\n");
b0407983 2203 tb_dump_switch(tb, sw);
a25c8b2f
AN
2204
2205 /* configure switch */
2206 sw->config.upstream_port_number = upstream_port;
f0342e75
MW
2207 sw->config.depth = depth;
2208 sw->config.route_hi = upper_32_bits(route);
2209 sw->config.route_lo = lower_32_bits(route);
bfe778ac 2210 sw->config.enabled = 0;
a25c8b2f 2211
b0407983 2212 /* Make sure we do not exceed maximum topology limit */
704a940d
CIK
2213 if (tb_switch_exceeds_max_depth(sw, depth)) {
2214 ret = -EADDRNOTAVAIL;
2215 goto err_free_sw_ports;
2216 }
b0407983 2217
a25c8b2f
AN
2218 /* initialize ports */
2219 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
343fcb8c 2220 GFP_KERNEL);
444ac384
MW
2221 if (!sw->ports) {
2222 ret = -ENOMEM;
bfe778ac 2223 goto err_free_sw_ports;
444ac384 2224 }
a25c8b2f
AN
2225
2226 for (i = 0; i <= sw->config.max_port_number; i++) {
343fcb8c
AN
2227 /* minimum setup for tb_find_cap and tb_drom_read to work */
2228 sw->ports[i].sw = sw;
2229 sw->ports[i].port = i;
781e14ea
MW
2230
2231 /* Control port does not need HopID allocation */
2232 if (i) {
2233 ida_init(&sw->ports[i].in_hopids);
2234 ida_init(&sw->ports[i].out_hopids);
2235 }
a25c8b2f
AN
2236 }
2237
444ac384 2238 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
b0407983
MW
2239 if (ret > 0)
2240 sw->cap_plug_events = ret;
ca389f71 2241
23ccd21c
GF
2242 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2243 if (ret > 0)
2244 sw->cap_vsec_tmu = ret;
2245
444ac384
MW
2246 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2247 if (ret > 0)
2248 sw->cap_lc = ret;
a9be5582 2249
43f977bc
GF
2250 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2251 if (ret > 0)
2252 sw->cap_lp = ret;
2253
f67cf491
MW
2254 /* Root switch is always authorized */
2255 if (!route)
2256 sw->authorized = true;
2257
bfe778ac
MW
2258 device_initialize(&sw->dev);
2259 sw->dev.parent = parent;
2260 sw->dev.bus = &tb_bus_type;
2261 sw->dev.type = &tb_switch_type;
2262 sw->dev.groups = switch_groups;
2263 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2264
2265 return sw;
2266
2267err_free_sw_ports:
2268 kfree(sw->ports);
2269 kfree(sw);
2270
444ac384 2271 return ERR_PTR(ret);
bfe778ac
MW
2272}
2273
e6b245cc
MW
2274/**
2275 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2276 * @tb: Pointer to the owning domain
2277 * @parent: Parent device for this switch
2278 * @route: Route string for this switch
2279 *
2280 * This creates a switch in safe mode. This means the switch pretty much
2281 * lacks all capabilities except DMA configuration port before it is
2282 * flashed with a valid NVM firmware.
2283 *
2284 * The returned switch must be released by calling tb_switch_put().
2285 *
444ac384 2286 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
e6b245cc
MW
2287 */
2288struct tb_switch *
2289tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2290{
2291 struct tb_switch *sw;
2292
2293 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2294 if (!sw)
444ac384 2295 return ERR_PTR(-ENOMEM);
e6b245cc
MW
2296
2297 sw->tb = tb;
2298 sw->config.depth = tb_route_length(route);
2299 sw->config.route_hi = upper_32_bits(route);
2300 sw->config.route_lo = lower_32_bits(route);
2301 sw->safe_mode = true;
2302
2303 device_initialize(&sw->dev);
2304 sw->dev.parent = parent;
2305 sw->dev.bus = &tb_bus_type;
2306 sw->dev.type = &tb_switch_type;
2307 sw->dev.groups = switch_groups;
2308 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2309
2310 return sw;
2311}
2312
bfe778ac
MW
2313/**
2314 * tb_switch_configure() - Uploads configuration to the switch
2315 * @sw: Switch to configure
2316 *
2317 * Call this function before the switch is added to the system. It will
2318 * upload configuration to the switch and makes it available for the
b0407983
MW
2319 * connection manager to use. Can be called to the switch again after
2320 * resume from low power states to re-initialize it.
bfe778ac
MW
2321 *
2322 * Return: %0 in case of success and negative errno in case of failure
2323 */
2324int tb_switch_configure(struct tb_switch *sw)
2325{
2326 struct tb *tb = sw->tb;
2327 u64 route;
2328 int ret;
2329
2330 route = tb_route(sw);
bfe778ac 2331
b0407983 2332 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
b2911a59 2333 sw->config.enabled ? "restoring" : "initializing", route,
b0407983 2334 tb_route_length(route), sw->config.upstream_port_number);
bfe778ac 2335
bfe778ac
MW
2336 sw->config.enabled = 1;
2337
b0407983
MW
2338 if (tb_switch_is_usb4(sw)) {
2339 /*
2340 * For USB4 devices, we need to program the CM version
2341 * accordingly so that it knows to expose all the
2342 * additional capabilities.
2343 */
2344 sw->config.cmuv = USB4_VERSION_1_0;
2345
2346 /* Enumerate the switch */
2347 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2348 ROUTER_CS_1, 4);
2349 if (ret)
2350 return ret;
bfe778ac 2351
b0407983 2352 ret = usb4_switch_setup(sw);
b0407983
MW
2353 } else {
2354 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2355 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2356 sw->config.vendor_id);
2357
2358 if (!sw->cap_plug_events) {
2359 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2360 return -ENODEV;
2361 }
2362
2363 /* Enumerate the switch */
2364 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2365 ROUTER_CS_1, 3);
b0407983 2366 }
e879a709
MW
2367 if (ret)
2368 return ret;
2369
bfe778ac
MW
2370 return tb_plug_events_active(sw, true);
2371}
2372
2cc12751 2373static int tb_switch_set_uuid(struct tb_switch *sw)
bfe778ac 2374{
b0407983 2375 bool uid = false;
bfe778ac 2376 u32 uuid[4];
a9be5582 2377 int ret;
bfe778ac
MW
2378
2379 if (sw->uuid)
a9be5582 2380 return 0;
bfe778ac 2381
b0407983
MW
2382 if (tb_switch_is_usb4(sw)) {
2383 ret = usb4_switch_read_uid(sw, &sw->uid);
2384 if (ret)
2385 return ret;
2386 uid = true;
2387 } else {
2388 /*
2389 * The newer controllers include fused UUID as part of
2390 * link controller specific registers
2391 */
2392 ret = tb_lc_read_uuid(sw, uuid);
2393 if (ret) {
2394 if (ret != -EINVAL)
2395 return ret;
2396 uid = true;
2397 }
2398 }
2399
2400 if (uid) {
bfe778ac
MW
2401 /*
2402 * ICM generates UUID based on UID and fills the upper
2403 * two words with ones. This is not strictly following
2404 * UUID format but we want to be compatible with it so
2405 * we do the same here.
2406 */
2407 uuid[0] = sw->uid & 0xffffffff;
2408 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2409 uuid[2] = 0xffffffff;
2410 uuid[3] = 0xffffffff;
2411 }
2412
2413 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2cc12751 2414 if (!sw->uuid)
a9be5582
MW
2415 return -ENOMEM;
2416 return 0;
bfe778ac
MW
2417}
2418
e6b245cc 2419static int tb_switch_add_dma_port(struct tb_switch *sw)
3e136768 2420{
e6b245cc
MW
2421 u32 status;
2422 int ret;
2423
3e136768 2424 switch (sw->generation) {
3e136768
MW
2425 case 2:
2426 /* Only root switch can be upgraded */
2427 if (tb_route(sw))
e6b245cc 2428 return 0;
7a7ebfa8 2429
df561f66 2430 fallthrough;
7a7ebfa8 2431 case 3:
661b1947 2432 case 4:
7a7ebfa8
MW
2433 ret = tb_switch_set_uuid(sw);
2434 if (ret)
2435 return ret;
3e136768
MW
2436 break;
2437
2438 default:
e6b245cc
MW
2439 /*
2440 * DMA port is the only thing available when the switch
2441 * is in safe mode.
2442 */
2443 if (!sw->safe_mode)
2444 return 0;
2445 break;
3e136768
MW
2446 }
2447
661b1947
MW
2448 if (sw->no_nvm_upgrade)
2449 return 0;
2450
2451 if (tb_switch_is_usb4(sw)) {
2452 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2453 if (ret)
2454 return ret;
2455
2456 if (status) {
2457 tb_sw_info(sw, "switch flash authentication failed\n");
2458 nvm_set_auth_status(sw, status);
2459 }
2460
2461 return 0;
2462 }
2463
3f415e5e 2464 /* Root switch DMA port requires running firmware */
f07a3608 2465 if (!tb_route(sw) && !tb_switch_is_icm(sw))
e6b245cc
MW
2466 return 0;
2467
3e136768 2468 sw->dma_port = dma_port_alloc(sw);
e6b245cc
MW
2469 if (!sw->dma_port)
2470 return 0;
2471
7a7ebfa8
MW
2472 /*
2473 * If there is status already set then authentication failed
2474 * when the dma_port_flash_update_auth() returned. Power cycling
2475 * is not needed (it was done already) so only thing we do here
2476 * is to unblock runtime PM of the root port.
2477 */
2478 nvm_get_auth_status(sw, &status);
2479 if (status) {
2480 if (!tb_route(sw))
b0407983 2481 nvm_authenticate_complete_dma_port(sw);
7a7ebfa8
MW
2482 return 0;
2483 }
2484
e6b245cc
MW
2485 /*
2486 * Check status of the previous flash authentication. If there
2487 * is one we need to power cycle the switch in any case to make
2488 * it functional again.
2489 */
2490 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2491 if (ret <= 0)
2492 return ret;
2493
1830b6ee
MW
2494 /* Now we can allow root port to suspend again */
2495 if (!tb_route(sw))
b0407983 2496 nvm_authenticate_complete_dma_port(sw);
1830b6ee 2497
e6b245cc
MW
2498 if (status) {
2499 tb_sw_info(sw, "switch flash authentication failed\n");
e6b245cc
MW
2500 nvm_set_auth_status(sw, status);
2501 }
2502
2503 tb_sw_info(sw, "power cycling the switch now\n");
2504 dma_port_power_cycle(sw->dma_port);
2505
2506 /*
2507 * We return error here which causes the switch adding failure.
2508 * It should appear back after power cycle is complete.
2509 */
2510 return -ESHUTDOWN;
3e136768
MW
2511}
2512
0d46c08d
MW
2513static void tb_switch_default_link_ports(struct tb_switch *sw)
2514{
2515 int i;
2516
42716425 2517 for (i = 1; i <= sw->config.max_port_number; i++) {
0d46c08d
MW
2518 struct tb_port *port = &sw->ports[i];
2519 struct tb_port *subordinate;
2520
2521 if (!tb_port_is_null(port))
2522 continue;
2523
2524 /* Check for the subordinate port */
2525 if (i == sw->config.max_port_number ||
2526 !tb_port_is_null(&sw->ports[i + 1]))
2527 continue;
2528
2529 /* Link them if not already done so (by DROM) */
2530 subordinate = &sw->ports[i + 1];
2531 if (!port->dual_link_port && !subordinate->dual_link_port) {
2532 port->link_nr = 0;
2533 port->dual_link_port = subordinate;
2534 subordinate->link_nr = 1;
2535 subordinate->dual_link_port = port;
2536
2537 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2538 port->port, subordinate->port);
2539 }
2540 }
2541}
2542
91c0c120
MW
2543static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2544{
2545 const struct tb_port *up = tb_upstream_port(sw);
2546
2547 if (!up->dual_link_port || !up->dual_link_port->remote)
2548 return false;
2549
b0407983
MW
2550 if (tb_switch_is_usb4(sw))
2551 return usb4_switch_lane_bonding_possible(sw);
91c0c120
MW
2552 return tb_lc_lane_bonding_possible(sw);
2553}
2554
2555static int tb_switch_update_link_attributes(struct tb_switch *sw)
2556{
2557 struct tb_port *up;
2558 bool change = false;
2559 int ret;
2560
2561 if (!tb_route(sw) || tb_switch_is_icm(sw))
2562 return 0;
2563
2564 up = tb_upstream_port(sw);
2565
2566 ret = tb_port_get_link_speed(up);
2567 if (ret < 0)
2568 return ret;
2569 if (sw->link_speed != ret)
2570 change = true;
2571 sw->link_speed = ret;
2572
2573 ret = tb_port_get_link_width(up);
2574 if (ret < 0)
2575 return ret;
2576 if (sw->link_width != ret)
2577 change = true;
2578 sw->link_width = ret;
2579
2580 /* Notify userspace that there is possible link attribute change */
2581 if (device_is_registered(&sw->dev) && change)
2582 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2583
2584 return 0;
2585}
2586
2587/**
2588 * tb_switch_lane_bonding_enable() - Enable lane bonding
2589 * @sw: Switch to enable lane bonding
2590 *
2591 * Connection manager can call this function to enable lane bonding of a
2592 * switch. If conditions are correct and both switches support the feature,
2593 * lanes are bonded. It is safe to call this to any switch.
2594 */
2595int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2596{
2597 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2598 struct tb_port *up, *down;
2599 u64 route = tb_route(sw);
2600 int ret;
2601
2602 if (!route)
2603 return 0;
2604
2605 if (!tb_switch_lane_bonding_possible(sw))
2606 return 0;
2607
2608 up = tb_upstream_port(sw);
2609 down = tb_port_at(route, parent);
2610
2611 if (!tb_port_is_width_supported(up, 2) ||
2612 !tb_port_is_width_supported(down, 2))
2613 return 0;
2614
2615 ret = tb_port_lane_bonding_enable(up);
2616 if (ret) {
2617 tb_port_warn(up, "failed to enable lane bonding\n");
2618 return ret;
2619 }
2620
2621 ret = tb_port_lane_bonding_enable(down);
2622 if (ret) {
2623 tb_port_warn(down, "failed to enable lane bonding\n");
2624 tb_port_lane_bonding_disable(up);
2625 return ret;
2626 }
2627
e7051bea
MW
2628 ret = tb_port_wait_for_link_width(down, 2, 100);
2629 if (ret) {
2630 tb_port_warn(down, "timeout enabling lane bonding\n");
2631 return ret;
2632 }
2633
69fea377
MW
2634 tb_port_update_credits(down);
2635 tb_port_update_credits(up);
91c0c120
MW
2636 tb_switch_update_link_attributes(sw);
2637
2638 tb_sw_dbg(sw, "lane bonding enabled\n");
2639 return ret;
2640}
2641
2642/**
2643 * tb_switch_lane_bonding_disable() - Disable lane bonding
2644 * @sw: Switch whose lane bonding to disable
2645 *
2646 * Disables lane bonding between @sw and parent. This can be called even
2647 * if lanes were not bonded originally.
2648 */
2649void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2650{
2651 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2652 struct tb_port *up, *down;
2653
2654 if (!tb_route(sw))
2655 return;
2656
2657 up = tb_upstream_port(sw);
2658 if (!up->bonded)
2659 return;
2660
2661 down = tb_port_at(tb_route(sw), parent);
2662
2663 tb_port_lane_bonding_disable(up);
2664 tb_port_lane_bonding_disable(down);
2665
e7051bea
MW
2666 /*
2667 * It is fine if we get other errors as the router might have
2668 * been unplugged.
2669 */
2670 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2671 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2672
69fea377
MW
2673 tb_port_update_credits(down);
2674 tb_port_update_credits(up);
91c0c120 2675 tb_switch_update_link_attributes(sw);
69fea377 2676
91c0c120
MW
2677 tb_sw_dbg(sw, "lane bonding disabled\n");
2678}
2679
de462039
MW
2680/**
2681 * tb_switch_configure_link() - Set link configured
2682 * @sw: Switch whose link is configured
2683 *
2684 * Sets the link upstream from @sw configured (from both ends) so that
2685 * it will not be disconnected when the domain exits sleep. Can be
2686 * called for any switch.
2687 *
2688 * It is recommended that this is called after lane bonding is enabled.
2689 *
2690 * Returns %0 on success and negative errno in case of error.
2691 */
2692int tb_switch_configure_link(struct tb_switch *sw)
2693{
e28178bf
MW
2694 struct tb_port *up, *down;
2695 int ret;
2696
de462039
MW
2697 if (!tb_route(sw) || tb_switch_is_icm(sw))
2698 return 0;
2699
e28178bf
MW
2700 up = tb_upstream_port(sw);
2701 if (tb_switch_is_usb4(up->sw))
2702 ret = usb4_port_configure(up);
2703 else
2704 ret = tb_lc_configure_port(up);
2705 if (ret)
2706 return ret;
2707
2708 down = up->remote;
2709 if (tb_switch_is_usb4(down->sw))
2710 return usb4_port_configure(down);
2711 return tb_lc_configure_port(down);
de462039
MW
2712}
2713
2714/**
2715 * tb_switch_unconfigure_link() - Unconfigure link
2716 * @sw: Switch whose link is unconfigured
2717 *
2718 * Sets the link unconfigured so the @sw will be disconnected if the
2719 * domain exists sleep.
2720 */
2721void tb_switch_unconfigure_link(struct tb_switch *sw)
2722{
e28178bf
MW
2723 struct tb_port *up, *down;
2724
de462039
MW
2725 if (sw->is_unplugged)
2726 return;
2727 if (!tb_route(sw) || tb_switch_is_icm(sw))
2728 return;
2729
e28178bf
MW
2730 up = tb_upstream_port(sw);
2731 if (tb_switch_is_usb4(up->sw))
2732 usb4_port_unconfigure(up);
2733 else
2734 tb_lc_unconfigure_port(up);
2735
2736 down = up->remote;
2737 if (tb_switch_is_usb4(down->sw))
2738 usb4_port_unconfigure(down);
de462039 2739 else
e28178bf 2740 tb_lc_unconfigure_port(down);
de462039
MW
2741}
2742
56ad3aef
MW
2743static void tb_switch_credits_init(struct tb_switch *sw)
2744{
2745 if (tb_switch_is_icm(sw))
2746 return;
2747 if (!tb_switch_is_usb4(sw))
2748 return;
2749 if (usb4_switch_credits_init(sw))
2750 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2751}
2752
bfe778ac
MW
2753/**
2754 * tb_switch_add() - Add a switch to the domain
2755 * @sw: Switch to add
2756 *
2757 * This is the last step in adding switch to the domain. It will read
2758 * identification information from DROM and initializes ports so that
2759 * they can be used to connect other switches. The switch will be
2760 * exposed to the userspace when this function successfully returns. To
2761 * remove and release the switch, call tb_switch_remove().
2762 *
2763 * Return: %0 in case of success and negative errno in case of failure
2764 */
2765int tb_switch_add(struct tb_switch *sw)
2766{
2767 int i, ret;
2768
3e136768
MW
2769 /*
2770 * Initialize DMA control port now before we read DROM. Recent
2771 * host controllers have more complete DROM on NVM that includes
2772 * vendor and model identification strings which we then expose
2773 * to the userspace. NVM can be accessed through DMA
2774 * configuration based mailbox.
2775 */
e6b245cc 2776 ret = tb_switch_add_dma_port(sw);
af99f696
MW
2777 if (ret) {
2778 dev_err(&sw->dev, "failed to add DMA port\n");
f53e7676 2779 return ret;
af99f696 2780 }
343fcb8c 2781
e6b245cc 2782 if (!sw->safe_mode) {
56ad3aef
MW
2783 tb_switch_credits_init(sw);
2784
e6b245cc
MW
2785 /* read drom */
2786 ret = tb_drom_read(sw);
2787 if (ret) {
af99f696 2788 dev_err(&sw->dev, "reading DROM failed\n");
e6b245cc
MW
2789 return ret;
2790 }
daa5140f 2791 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
bfe778ac 2792
e23a5afd
MW
2793 tb_check_quirks(sw);
2794
2cc12751 2795 ret = tb_switch_set_uuid(sw);
af99f696
MW
2796 if (ret) {
2797 dev_err(&sw->dev, "failed to set UUID\n");
2cc12751 2798 return ret;
af99f696 2799 }
e6b245cc
MW
2800
2801 for (i = 0; i <= sw->config.max_port_number; i++) {
2802 if (sw->ports[i].disabled) {
daa5140f 2803 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
e6b245cc
MW
2804 continue;
2805 }
2806 ret = tb_init_port(&sw->ports[i]);
af99f696
MW
2807 if (ret) {
2808 dev_err(&sw->dev, "failed to initialize port %d\n", i);
e6b245cc 2809 return ret;
af99f696 2810 }
343fcb8c 2811 }
91c0c120 2812
0d46c08d
MW
2813 tb_switch_default_link_ports(sw);
2814
91c0c120
MW
2815 ret = tb_switch_update_link_attributes(sw);
2816 if (ret)
2817 return ret;
cf29b9af
RM
2818
2819 ret = tb_switch_tmu_init(sw);
2820 if (ret)
2821 return ret;
343fcb8c
AN
2822 }
2823
e6b245cc 2824 ret = device_add(&sw->dev);
af99f696
MW
2825 if (ret) {
2826 dev_err(&sw->dev, "failed to add device: %d\n", ret);
e6b245cc 2827 return ret;
af99f696 2828 }
e6b245cc 2829
a83bc4a5
MW
2830 if (tb_route(sw)) {
2831 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2832 sw->vendor, sw->device);
2833 if (sw->vendor_name && sw->device_name)
2834 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2835 sw->device_name);
2836 }
2837
cae5f515
MW
2838 ret = usb4_switch_add_ports(sw);
2839 if (ret) {
2840 dev_err(&sw->dev, "failed to add USB4 ports\n");
2841 goto err_del;
2842 }
2843
e6b245cc 2844 ret = tb_switch_nvm_add(sw);
2d8ff0b5 2845 if (ret) {
af99f696 2846 dev_err(&sw->dev, "failed to add NVM devices\n");
cae5f515 2847 goto err_ports;
2d8ff0b5 2848 }
e6b245cc 2849
b2911a59
MW
2850 /*
2851 * Thunderbolt routers do not generate wakeups themselves but
2852 * they forward wakeups from tunneled protocols, so enable it
2853 * here.
2854 */
2855 device_init_wakeup(&sw->dev, true);
2856
2d8ff0b5
MW
2857 pm_runtime_set_active(&sw->dev);
2858 if (sw->rpm) {
2859 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2860 pm_runtime_use_autosuspend(&sw->dev);
2861 pm_runtime_mark_last_busy(&sw->dev);
2862 pm_runtime_enable(&sw->dev);
2863 pm_request_autosuspend(&sw->dev);
2864 }
2865
54e41810 2866 tb_switch_debugfs_init(sw);
2d8ff0b5 2867 return 0;
cae5f515
MW
2868
2869err_ports:
2870 usb4_switch_remove_ports(sw);
2871err_del:
2872 device_del(&sw->dev);
2873
2874 return ret;
bfe778ac 2875}
c90553b3 2876
bfe778ac
MW
2877/**
2878 * tb_switch_remove() - Remove and release a switch
2879 * @sw: Switch to remove
2880 *
2881 * This will remove the switch from the domain and release it after last
2882 * reference count drops to zero. If there are switches connected below
2883 * this switch, they will be removed as well.
2884 */
2885void tb_switch_remove(struct tb_switch *sw)
2886{
b433d010 2887 struct tb_port *port;
ca389f71 2888
54e41810
GF
2889 tb_switch_debugfs_remove(sw);
2890
2d8ff0b5
MW
2891 if (sw->rpm) {
2892 pm_runtime_get_sync(&sw->dev);
2893 pm_runtime_disable(&sw->dev);
2894 }
2895
bfe778ac 2896 /* port 0 is the switch itself and never has a remote */
b433d010
MW
2897 tb_switch_for_each_port(sw, port) {
2898 if (tb_port_has_remote(port)) {
2899 tb_switch_remove(port->remote->sw);
2900 port->remote = NULL;
2901 } else if (port->xdomain) {
2902 tb_xdomain_remove(port->xdomain);
2903 port->xdomain = NULL;
dfe40ca4 2904 }
dacb1287
KK
2905
2906 /* Remove any downstream retimers */
2907 tb_retimer_remove_all(port);
bfe778ac
MW
2908 }
2909
2910 if (!sw->is_unplugged)
2911 tb_plug_events_active(sw, false);
b0407983 2912
e6b245cc 2913 tb_switch_nvm_remove(sw);
cae5f515 2914 usb4_switch_remove_ports(sw);
a83bc4a5
MW
2915
2916 if (tb_route(sw))
2917 dev_info(&sw->dev, "device disconnected\n");
bfe778ac 2918 device_unregister(&sw->dev);
a25c8b2f
AN
2919}
2920
053596d9 2921/**
aae20bb6 2922 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
5c6b471b 2923 * @sw: Router to mark unplugged
053596d9 2924 */
aae20bb6 2925void tb_sw_set_unplugged(struct tb_switch *sw)
053596d9 2926{
b433d010
MW
2927 struct tb_port *port;
2928
053596d9
AN
2929 if (sw == sw->tb->root_switch) {
2930 tb_sw_WARN(sw, "cannot unplug root switch\n");
2931 return;
2932 }
2933 if (sw->is_unplugged) {
2934 tb_sw_WARN(sw, "is_unplugged already set\n");
2935 return;
2936 }
2937 sw->is_unplugged = true;
b433d010
MW
2938 tb_switch_for_each_port(sw, port) {
2939 if (tb_port_has_remote(port))
2940 tb_sw_set_unplugged(port->remote->sw);
2941 else if (port->xdomain)
2942 port->xdomain->is_unplugged = true;
053596d9
AN
2943 }
2944}
2945
b2911a59
MW
2946static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2947{
2948 if (flags)
2949 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2950 else
2951 tb_sw_dbg(sw, "disabling wakeup\n");
2952
2953 if (tb_switch_is_usb4(sw))
2954 return usb4_switch_set_wake(sw, flags);
2955 return tb_lc_set_wake(sw, flags);
2956}
2957
23dd5bb4
AN
2958int tb_switch_resume(struct tb_switch *sw)
2959{
b433d010
MW
2960 struct tb_port *port;
2961 int err;
2962
daa5140f 2963 tb_sw_dbg(sw, "resuming switch\n");
23dd5bb4 2964
08a5e4ce
MW
2965 /*
2966 * Check for UID of the connected switches except for root
2967 * switch which we assume cannot be removed.
2968 */
2969 if (tb_route(sw)) {
2970 u64 uid;
2971
7ea4cd6b
MW
2972 /*
2973 * Check first that we can still read the switch config
2974 * space. It may be that there is now another domain
2975 * connected.
2976 */
2977 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2978 if (err < 0) {
2979 tb_sw_info(sw, "switch not present anymore\n");
2980 return err;
2981 }
2982
a283de3e
ML
2983 /* We don't have any way to confirm this was the same device */
2984 if (!sw->uid)
2985 return -ENODEV;
2986
b0407983
MW
2987 if (tb_switch_is_usb4(sw))
2988 err = usb4_switch_read_uid(sw, &uid);
2989 else
2990 err = tb_drom_read_uid_only(sw, &uid);
08a5e4ce
MW
2991 if (err) {
2992 tb_sw_warn(sw, "uid read failed\n");
2993 return err;
2994 }
2995 if (sw->uid != uid) {
2996 tb_sw_info(sw,
2997 "changed while suspended (uid %#llx -> %#llx)\n",
2998 sw->uid, uid);
2999 return -ENODEV;
3000 }
23dd5bb4
AN
3001 }
3002
b0407983 3003 err = tb_switch_configure(sw);
23dd5bb4
AN
3004 if (err)
3005 return err;
3006
b2911a59
MW
3007 /* Disable wakes */
3008 tb_switch_set_wake(sw, 0);
3009
8145c435
MW
3010 err = tb_switch_tmu_init(sw);
3011 if (err)
3012 return err;
3013
23dd5bb4 3014 /* check for surviving downstream switches */
b433d010 3015 tb_switch_for_each_port(sw, port) {
3fb10ea4
RM
3016 if (!tb_port_is_null(port))
3017 continue;
3018
3019 if (!tb_port_resume(port))
23dd5bb4 3020 continue;
dfe40ca4 3021
7ea4cd6b 3022 if (tb_wait_for_port(port, true) <= 0) {
23dd5bb4
AN
3023 tb_port_warn(port,
3024 "lost during suspend, disconnecting\n");
7ea4cd6b
MW
3025 if (tb_port_has_remote(port))
3026 tb_sw_set_unplugged(port->remote->sw);
3027 else if (port->xdomain)
3028 port->xdomain->is_unplugged = true;
3fb10ea4 3029 } else {
b0407983
MW
3030 /*
3031 * Always unlock the port so the downstream
3032 * switch/domain is accessible.
3033 */
3034 if (tb_port_unlock(port))
3035 tb_port_warn(port, "failed to unlock port\n");
3036 if (port->remote && tb_switch_resume(port->remote->sw)) {
7ea4cd6b
MW
3037 tb_port_warn(port,
3038 "lost during suspend, disconnecting\n");
3039 tb_sw_set_unplugged(port->remote->sw);
3040 }
23dd5bb4
AN
3041 }
3042 }
3043 return 0;
3044}
3045
6ac6faee
MW
3046/**
3047 * tb_switch_suspend() - Put a switch to sleep
3048 * @sw: Switch to suspend
3049 * @runtime: Is this runtime suspend or system sleep
3050 *
3051 * Suspends router and all its children. Enables wakes according to
3052 * value of @runtime and then sets sleep bit for the router. If @sw is
3053 * host router the domain is ready to go to sleep once this function
3054 * returns.
3055 */
3056void tb_switch_suspend(struct tb_switch *sw, bool runtime)
23dd5bb4 3057{
b2911a59 3058 unsigned int flags = 0;
b433d010
MW
3059 struct tb_port *port;
3060 int err;
3061
6ac6faee
MW
3062 tb_sw_dbg(sw, "suspending switch\n");
3063
43f977bc
GF
3064 /*
3065 * Actually only needed for Titan Ridge but for simplicity can be
3066 * done for USB4 device too as CLx is re-enabled at resume.
3067 */
3068 if (tb_switch_disable_clx(sw, TB_CL0S))
3069 tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
3070
23dd5bb4
AN
3071 err = tb_plug_events_active(sw, false);
3072 if (err)
3073 return;
3074
b433d010
MW
3075 tb_switch_for_each_port(sw, port) {
3076 if (tb_port_has_remote(port))
6ac6faee 3077 tb_switch_suspend(port->remote->sw, runtime);
23dd5bb4 3078 }
5480dfc2 3079
6ac6faee
MW
3080 if (runtime) {
3081 /* Trigger wake when something is plugged in/out */
3082 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
6026b703
MW
3083 flags |= TB_WAKE_ON_USB4;
3084 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
6ac6faee
MW
3085 } else if (device_may_wakeup(&sw->dev)) {
3086 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3087 }
b2911a59
MW
3088
3089 tb_switch_set_wake(sw, flags);
3090
b0407983
MW
3091 if (tb_switch_is_usb4(sw))
3092 usb4_switch_set_sleep(sw);
3093 else
3094 tb_lc_set_sleep(sw);
23dd5bb4 3095}
f67cf491 3096
8afe909b
MW
3097/**
3098 * tb_switch_query_dp_resource() - Query availability of DP resource
3099 * @sw: Switch whose DP resource is queried
3100 * @in: DP IN port
3101 *
3102 * Queries availability of DP resource for DP tunneling using switch
3103 * specific means. Returns %true if resource is available.
3104 */
3105bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3106{
b0407983
MW
3107 if (tb_switch_is_usb4(sw))
3108 return usb4_switch_query_dp_resource(sw, in);
8afe909b
MW
3109 return tb_lc_dp_sink_query(sw, in);
3110}
3111
3112/**
3113 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3114 * @sw: Switch whose DP resource is allocated
3115 * @in: DP IN port
3116 *
3117 * Allocates DP resource for DP tunneling. The resource must be
3118 * available for this to succeed (see tb_switch_query_dp_resource()).
3119 * Returns %0 in success and negative errno otherwise.
3120 */
3121int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3122{
ce05b997
MW
3123 int ret;
3124
b0407983 3125 if (tb_switch_is_usb4(sw))
ce05b997
MW
3126 ret = usb4_switch_alloc_dp_resource(sw, in);
3127 else
3128 ret = tb_lc_dp_sink_alloc(sw, in);
3129
3130 if (ret)
3131 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3132 in->port);
3133 else
3134 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3135
3136 return ret;
8afe909b
MW
3137}
3138
3139/**
3140 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3141 * @sw: Switch whose DP resource is de-allocated
3142 * @in: DP IN port
3143 *
3144 * De-allocates DP resource that was previously allocated for DP
3145 * tunneling.
3146 */
3147void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3148{
b0407983
MW
3149 int ret;
3150
3151 if (tb_switch_is_usb4(sw))
3152 ret = usb4_switch_dealloc_dp_resource(sw, in);
3153 else
3154 ret = tb_lc_dp_sink_dealloc(sw, in);
3155
3156 if (ret)
8afe909b
MW
3157 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3158 in->port);
ce05b997
MW
3159 else
3160 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
8afe909b
MW
3161}
3162
f67cf491
MW
3163struct tb_sw_lookup {
3164 struct tb *tb;
3165 u8 link;
3166 u8 depth;
7c39ffe7 3167 const uuid_t *uuid;
8e9267bb 3168 u64 route;
f67cf491
MW
3169};
3170
418e3ea1 3171static int tb_switch_match(struct device *dev, const void *data)
f67cf491
MW
3172{
3173 struct tb_switch *sw = tb_to_switch(dev);
418e3ea1 3174 const struct tb_sw_lookup *lookup = data;
f67cf491
MW
3175
3176 if (!sw)
3177 return 0;
3178 if (sw->tb != lookup->tb)
3179 return 0;
3180
3181 if (lookup->uuid)
3182 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3183
8e9267bb
RM
3184 if (lookup->route) {
3185 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3186 sw->config.route_hi == upper_32_bits(lookup->route);
3187 }
3188
f67cf491
MW
3189 /* Root switch is matched only by depth */
3190 if (!lookup->depth)
3191 return !sw->depth;
3192
3193 return sw->link == lookup->link && sw->depth == lookup->depth;
3194}
3195
3196/**
3197 * tb_switch_find_by_link_depth() - Find switch by link and depth
3198 * @tb: Domain the switch belongs
3199 * @link: Link number the switch is connected
3200 * @depth: Depth of the switch in link
3201 *
3202 * Returned switch has reference count increased so the caller needs to
3203 * call tb_switch_put() when done with the switch.
3204 */
3205struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3206{
3207 struct tb_sw_lookup lookup;
3208 struct device *dev;
3209
3210 memset(&lookup, 0, sizeof(lookup));
3211 lookup.tb = tb;
3212 lookup.link = link;
3213 lookup.depth = depth;
3214
3215 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3216 if (dev)
3217 return tb_to_switch(dev);
3218
3219 return NULL;
3220}
3221
3222/**
432019d6 3223 * tb_switch_find_by_uuid() - Find switch by UUID
f67cf491
MW
3224 * @tb: Domain the switch belongs
3225 * @uuid: UUID to look for
3226 *
3227 * Returned switch has reference count increased so the caller needs to
3228 * call tb_switch_put() when done with the switch.
3229 */
7c39ffe7 3230struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
f67cf491
MW
3231{
3232 struct tb_sw_lookup lookup;
3233 struct device *dev;
3234
3235 memset(&lookup, 0, sizeof(lookup));
3236 lookup.tb = tb;
3237 lookup.uuid = uuid;
3238
3239 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3240 if (dev)
3241 return tb_to_switch(dev);
3242
3243 return NULL;
3244}
e6b245cc 3245
8e9267bb
RM
3246/**
3247 * tb_switch_find_by_route() - Find switch by route string
3248 * @tb: Domain the switch belongs
3249 * @route: Route string to look for
3250 *
3251 * Returned switch has reference count increased so the caller needs to
3252 * call tb_switch_put() when done with the switch.
3253 */
3254struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3255{
3256 struct tb_sw_lookup lookup;
3257 struct device *dev;
3258
3259 if (!route)
3260 return tb_switch_get(tb->root_switch);
3261
3262 memset(&lookup, 0, sizeof(lookup));
3263 lookup.tb = tb;
3264 lookup.route = route;
3265
3266 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3267 if (dev)
3268 return tb_to_switch(dev);
3269
3270 return NULL;
3271}
3272
386e5e29
MW
3273/**
3274 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3275 * @sw: Switch to find the port from
3276 * @type: Port type to look for
3277 */
3278struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3279 enum tb_port_type type)
3280{
3281 struct tb_port *port;
3282
3283 tb_switch_for_each_port(sw, port) {
3284 if (port->config.type == type)
3285 return port;
3286 }
3287
3288 return NULL;
3289}
8a90e4fa
GF
3290
3291static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
3292{
3293 u32 phy;
3294 int ret;
3295
3296 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3297 port->cap_phy + LANE_ADP_CS_1, 1);
3298 if (ret)
3299 return ret;
3300
3301 if (secondary)
3302 phy |= LANE_ADP_CS_1_PMS;
3303 else
3304 phy &= ~LANE_ADP_CS_1_PMS;
3305
3306 return tb_port_write(port, &phy, TB_CFG_PORT,
3307 port->cap_phy + LANE_ADP_CS_1, 1);
3308}
3309
3310static int tb_port_pm_secondary_enable(struct tb_port *port)
3311{
3312 return __tb_port_pm_secondary_set(port, true);
3313}
3314
3315static int tb_port_pm_secondary_disable(struct tb_port *port)
3316{
3317 return __tb_port_pm_secondary_set(port, false);
3318}
3319
3320static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3321{
3322 struct tb_switch *parent = tb_switch_parent(sw);
3323 struct tb_port *up, *down;
3324 int ret;
3325
3326 if (!tb_route(sw))
3327 return 0;
3328
3329 up = tb_upstream_port(sw);
3330 down = tb_port_at(tb_route(sw), parent);
3331 ret = tb_port_pm_secondary_enable(up);
3332 if (ret)
3333 return ret;
3334
3335 return tb_port_pm_secondary_disable(down);
3336}
3337
43f977bc 3338/* Called for USB4 or Titan Ridge routers only */
8a90e4fa
GF
3339static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
3340{
3341 u32 mask, val;
3342 bool ret;
3343
3344 /* Don't enable CLx in case of two single-lane links */
3345 if (!port->bonded && port->dual_link_port)
3346 return false;
3347
3348 /* Don't enable CLx in case of inter-domain link */
3349 if (port->xdomain)
3350 return false;
3351
43f977bc
GF
3352 if (tb_switch_is_usb4(port->sw)) {
3353 if (!usb4_port_clx_supported(port))
3354 return false;
3355 } else if (!tb_lc_is_clx_supported(port)) {
8a90e4fa 3356 return false;
43f977bc 3357 }
8a90e4fa
GF
3358
3359 switch (clx) {
3360 case TB_CL0S:
3361 /* CL0s support requires also CL1 support */
3362 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
3363 break;
3364
3365 /* For now we support only CL0s. Not CL1, CL2 */
3366 case TB_CL1:
3367 case TB_CL2:
3368 default:
3369 return false;
3370 }
3371
3372 ret = tb_port_read(port, &val, TB_CFG_PORT,
3373 port->cap_phy + LANE_ADP_CS_0, 1);
3374 if (ret)
3375 return false;
3376
3377 return !!(val & mask);
3378}
3379
3380static inline bool tb_port_cl0s_supported(struct tb_port *port)
3381{
3382 return tb_port_clx_supported(port, TB_CL0S);
3383}
3384
3385static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
3386{
3387 u32 phy, mask;
3388 int ret;
3389
3390 /* To enable CL0s also required to enable CL1 */
3391 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
3392 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3393 port->cap_phy + LANE_ADP_CS_1, 1);
3394 if (ret)
3395 return ret;
3396
3397 if (enable)
3398 phy |= mask;
3399 else
3400 phy &= ~mask;
3401
3402 return tb_port_write(port, &phy, TB_CFG_PORT,
3403 port->cap_phy + LANE_ADP_CS_1, 1);
3404}
3405
3406static int tb_port_cl0s_disable(struct tb_port *port)
3407{
3408 return __tb_port_cl0s_set(port, false);
3409}
3410
3411static int tb_port_cl0s_enable(struct tb_port *port)
3412{
3413 return __tb_port_cl0s_set(port, true);
3414}
3415
3416static int tb_switch_enable_cl0s(struct tb_switch *sw)
3417{
3418 struct tb_switch *parent = tb_switch_parent(sw);
3419 bool up_cl0s_support, down_cl0s_support;
3420 struct tb_port *up, *down;
3421 int ret;
3422
43f977bc 3423 if (!tb_switch_is_clx_supported(sw))
8a90e4fa
GF
3424 return 0;
3425
3426 /*
3427 * Enable CLx for host router's downstream port as part of the
3428 * downstream router enabling procedure.
3429 */
3430 if (!tb_route(sw))
3431 return 0;
3432
3433 /* Enable CLx only for first hop router (depth = 1) */
3434 if (tb_route(parent))
3435 return 0;
3436
3437 ret = tb_switch_pm_secondary_resolve(sw);
3438 if (ret)
3439 return ret;
3440
3441 up = tb_upstream_port(sw);
3442 down = tb_port_at(tb_route(sw), parent);
3443
3444 up_cl0s_support = tb_port_cl0s_supported(up);
3445 down_cl0s_support = tb_port_cl0s_supported(down);
3446
3447 tb_port_dbg(up, "CL0s %ssupported\n",
3448 up_cl0s_support ? "" : "not ");
3449 tb_port_dbg(down, "CL0s %ssupported\n",
3450 down_cl0s_support ? "" : "not ");
3451
3452 if (!up_cl0s_support || !down_cl0s_support)
3453 return -EOPNOTSUPP;
3454
3455 ret = tb_port_cl0s_enable(up);
3456 if (ret)
3457 return ret;
3458
3459 ret = tb_port_cl0s_enable(down);
3460 if (ret) {
3461 tb_port_cl0s_disable(up);
3462 return ret;
3463 }
3464
43f977bc
GF
3465 ret = tb_switch_mask_clx_objections(sw);
3466 if (ret) {
3467 tb_port_cl0s_disable(up);
3468 tb_port_cl0s_disable(down);
3469 return ret;
3470 }
3471
8a90e4fa
GF
3472 sw->clx = TB_CL0S;
3473
3474 tb_port_dbg(up, "CL0s enabled\n");
3475 return 0;
3476}
3477
3478/**
3479 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3480 * @sw: Router to enable CLx for
3481 * @clx: The CLx state to enable
3482 *
3483 * Enable CLx state only for first hop router. That is the most common
3484 * use-case, that is intended for better thermal management, and so helps
3485 * to improve performance. CLx is enabled only if both sides of the link
3486 * support CLx, and if both sides of the link are not configured as two
3487 * single lane links and only if the link is not inter-domain link. The
3488 * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
3489 *
3490 * Return: Returns 0 on success or an error code on failure.
3491 */
3492int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3493{
3494 struct tb_switch *root_sw = sw->tb->root_switch;
3495
fa487b2a
GF
3496 if (!clx_enabled)
3497 return 0;
3498
8a90e4fa
GF
3499 /*
3500 * CLx is not enabled and validated on Intel USB4 platforms before
3501 * Alder Lake.
3502 */
3503 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3504 return 0;
3505
3506 switch (clx) {
3507 case TB_CL0S:
3508 return tb_switch_enable_cl0s(sw);
3509
3510 default:
3511 return -EOPNOTSUPP;
3512 }
3513}
3514
3515static int tb_switch_disable_cl0s(struct tb_switch *sw)
3516{
3517 struct tb_switch *parent = tb_switch_parent(sw);
3518 struct tb_port *up, *down;
3519 int ret;
3520
43f977bc 3521 if (!tb_switch_is_clx_supported(sw))
8a90e4fa
GF
3522 return 0;
3523
3524 /*
3525 * Disable CLx for host router's downstream port as part of the
3526 * downstream router enabling procedure.
3527 */
3528 if (!tb_route(sw))
3529 return 0;
3530
3531 /* Disable CLx only for first hop router (depth = 1) */
3532 if (tb_route(parent))
3533 return 0;
3534
3535 up = tb_upstream_port(sw);
3536 down = tb_port_at(tb_route(sw), parent);
3537 ret = tb_port_cl0s_disable(up);
3538 if (ret)
3539 return ret;
3540
3541 ret = tb_port_cl0s_disable(down);
3542 if (ret)
3543 return ret;
3544
3545 sw->clx = TB_CLX_DISABLE;
3546
3547 tb_port_dbg(up, "CL0s disabled\n");
3548 return 0;
3549}
3550
3551/**
3552 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3553 * @sw: Router to disable CLx for
3554 * @clx: The CLx state to disable
3555 *
3556 * Return: Returns 0 on success or an error code on failure.
3557 */
3558int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3559{
fa487b2a
GF
3560 if (!clx_enabled)
3561 return 0;
3562
8a90e4fa
GF
3563 switch (clx) {
3564 case TB_CL0S:
3565 return tb_switch_disable_cl0s(sw);
3566
3567 default:
3568 return -EOPNOTSUPP;
3569 }
3570}
43f977bc
GF
3571
3572/**
3573 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3574 * @sw: Router to mask objections for
3575 *
3576 * Mask the objections coming from the second depth routers in order to
3577 * stop these objections from interfering with the CLx states of the first
3578 * depth link.
3579 */
3580int tb_switch_mask_clx_objections(struct tb_switch *sw)
3581{
3582 int up_port = sw->config.upstream_port_number;
3583 u32 offset, val[2], mask_obj, unmask_obj;
3584 int ret, i;
3585
3586 /* Only Titan Ridge of pre-USB4 devices support CLx states */
3587 if (!tb_switch_is_titan_ridge(sw))
3588 return 0;
3589
3590 if (!tb_route(sw))
3591 return 0;
3592
3593 /*
3594 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3595 * Port A consists of lane adapters 1,2 and
3596 * Port B consists of lane adapters 3,4
3597 * If upstream port is A, (lanes are 1,2), we mask objections from
3598 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3599 */
3600 if (up_port == 1) {
3601 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3602 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3603 offset = TB_LOW_PWR_C1_CL1;
3604 } else {
3605 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3606 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3607 offset = TB_LOW_PWR_C3_CL1;
3608 }
3609
3610 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3611 sw->cap_lp + offset, ARRAY_SIZE(val));
3612 if (ret)
3613 return ret;
3614
3615 for (i = 0; i < ARRAY_SIZE(val); i++) {
3616 val[i] |= mask_obj;
3617 val[i] &= ~unmask_obj;
3618 }
3619
3620 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3621 sw->cap_lp + offset, ARRAY_SIZE(val));
3622}
3623
3624/*
3625 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3626 * device. For now used only for Titan Ridge.
3627 */
3628static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3629 unsigned int pcie_offset, u32 value)
3630{
3631 u32 offset, command, val;
3632 int ret;
3633
3634 if (sw->generation != 3)
3635 return -EOPNOTSUPP;
3636
3637 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3638 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3639 if (ret)
3640 return ret;
3641
3642 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3643 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3644 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3645 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3646 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3647 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3648
3649 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3650
3651 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3652 if (ret)
3653 return ret;
3654
3655 ret = tb_switch_wait_for_bit(sw, offset,
3656 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3657 if (ret)
3658 return ret;
3659
3660 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3661 if (ret)
3662 return ret;
3663
3664 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3665 return -ETIMEDOUT;
3666
3667 return 0;
3668}
3669
3670/**
3671 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3672 * @sw: Router to enable PCIe L1
3673 *
3674 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3675 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3676 * was configured. Due to Intel platforms limitation, shall be called only
3677 * for first hop switch.
3678 */
3679int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3680{
3681 struct tb_switch *parent = tb_switch_parent(sw);
3682 int ret;
3683
3684 if (!tb_route(sw))
3685 return 0;
3686
3687 if (!tb_switch_is_titan_ridge(sw))
3688 return 0;
3689
3690 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3691 if (tb_route(parent))
3692 return 0;
3693
3694 /* Write to downstream PCIe bridge #5 aka Dn4 */
3695 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3696 if (ret)
3697 return ret;
3698
3699 /* Write to Upstream PCIe bridge #0 aka Up0 */
3700 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3701}
30a4eca6
MW
3702
3703/**
3704 * tb_switch_xhci_connect() - Connect internal xHCI
3705 * @sw: Router whose xHCI to connect
3706 *
3707 * Can be called to any router. For Alpine Ridge and Titan Ridge
3708 * performs special flows that bring the xHCI functional for any device
3709 * connected to the type-C port. Call only after PCIe tunnel has been
3710 * established. The function only does the connect if not done already
3711 * so can be called several times for the same router.
3712 */
3713int tb_switch_xhci_connect(struct tb_switch *sw)
3714{
3715 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3716 struct tb_port *port1, *port3;
3717 int ret;
3718
3719 port1 = &sw->ports[1];
3720 port3 = &sw->ports[3];
3721
3722 if (tb_switch_is_alpine_ridge(sw)) {
3723 usb_port1 = tb_lc_is_usb_plugged(port1);
3724 usb_port3 = tb_lc_is_usb_plugged(port3);
3725 xhci_port1 = tb_lc_is_xhci_connected(port1);
3726 xhci_port3 = tb_lc_is_xhci_connected(port3);
3727
3728 /* Figure out correct USB port to connect */
3729 if (usb_port1 && !xhci_port1) {
3730 ret = tb_lc_xhci_connect(port1);
3731 if (ret)
3732 return ret;
3733 }
3734 if (usb_port3 && !xhci_port3)
3735 return tb_lc_xhci_connect(port3);
3736 } else if (tb_switch_is_titan_ridge(sw)) {
3737 ret = tb_lc_xhci_connect(port1);
3738 if (ret)
3739 return ret;
3740 return tb_lc_xhci_connect(port3);
3741 }
3742
3743 return 0;
3744}
3745
3746/**
3747 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3748 * @sw: Router whose xHCI to disconnect
3749 *
3750 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3751 * ports.
3752 */
3753void tb_switch_xhci_disconnect(struct tb_switch *sw)
3754{
3755 if (sw->generation == 3) {
3756 struct tb_port *port1 = &sw->ports[1];
3757 struct tb_port *port3 = &sw->ports[3];
3758
3759 tb_lc_xhci_disconnect(port1);
3760 tb_port_dbg(port1, "disconnected xHCI\n");
3761 tb_lc_xhci_disconnect(port3);
3762 tb_port_dbg(port3, "disconnected xHCI\n");
3763 }
3764}