thunderbolt: Rename Intel TB_VSE_CAP_IECS capability
[linux-block.git] / drivers / thunderbolt / switch.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a25c8b2f 2/*
15c6784c 3 * Thunderbolt driver - switch/port utility functions
a25c8b2f
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
15c6784c 6 * Copyright (C) 2018, Intel Corporation
a25c8b2f
AN
7 */
8
9#include <linux/delay.h>
e6b245cc
MW
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
2d8ff0b5 12#include <linux/pm_runtime.h>
09f11b6c 13#include <linux/sched/signal.h>
e6b245cc 14#include <linux/sizes.h>
10fefe56 15#include <linux/slab.h>
a25c8b2f
AN
16
17#include "tb.h"
18
e6b245cc
MW
19/* Switch NVM support */
20
e6b245cc 21#define NVM_CSS 0x10
e6b245cc
MW
22
23struct nvm_auth_status {
24 struct list_head list;
7c39ffe7 25 uuid_t uuid;
e6b245cc
MW
26 u32 status;
27};
28
29/*
30 * Hold NVM authentication failure status per switch This information
31 * needs to stay around even when the switch gets power cycled so we
32 * keep it separately.
33 */
34static LIST_HEAD(nvm_auth_status_cache);
35static DEFINE_MUTEX(nvm_auth_status_lock);
36
37static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
38{
39 struct nvm_auth_status *st;
40
41 list_for_each_entry(st, &nvm_auth_status_cache, list) {
7c39ffe7 42 if (uuid_equal(&st->uuid, sw->uuid))
e6b245cc
MW
43 return st;
44 }
45
46 return NULL;
47}
48
49static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
50{
51 struct nvm_auth_status *st;
52
53 mutex_lock(&nvm_auth_status_lock);
54 st = __nvm_get_auth_status(sw);
55 mutex_unlock(&nvm_auth_status_lock);
56
57 *status = st ? st->status : 0;
58}
59
60static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
61{
62 struct nvm_auth_status *st;
63
64 if (WARN_ON(!sw->uuid))
65 return;
66
67 mutex_lock(&nvm_auth_status_lock);
68 st = __nvm_get_auth_status(sw);
69
70 if (!st) {
71 st = kzalloc(sizeof(*st), GFP_KERNEL);
72 if (!st)
73 goto unlock;
74
75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
76 INIT_LIST_HEAD(&st->list);
77 list_add_tail(&st->list, &nvm_auth_status_cache);
78 }
79
80 st->status = status;
81unlock:
82 mutex_unlock(&nvm_auth_status_lock);
83}
84
85static void nvm_clear_auth_status(const struct tb_switch *sw)
86{
87 struct nvm_auth_status *st;
88
89 mutex_lock(&nvm_auth_status_lock);
90 st = __nvm_get_auth_status(sw);
91 if (st) {
92 list_del(&st->list);
93 kfree(st);
94 }
95 mutex_unlock(&nvm_auth_status_lock);
96}
97
98static int nvm_validate_and_write(struct tb_switch *sw)
99{
100 unsigned int image_size, hdr_size;
101 const u8 *buf = sw->nvm->buf;
102 u16 ds_size;
103 int ret;
104
105 if (!buf)
106 return -EINVAL;
107
108 image_size = sw->nvm->buf_data_size;
109 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
110 return -EINVAL;
111
112 /*
113 * FARB pointer must point inside the image and must at least
114 * contain parts of the digital section we will be reading here.
115 */
116 hdr_size = (*(u32 *)buf) & 0xffffff;
117 if (hdr_size + NVM_DEVID + 2 >= image_size)
118 return -EINVAL;
119
120 /* Digital section start should be aligned to 4k page */
121 if (!IS_ALIGNED(hdr_size, SZ_4K))
122 return -EINVAL;
123
124 /*
125 * Read digital section size and check that it also fits inside
126 * the image.
127 */
128 ds_size = *(u16 *)(buf + hdr_size);
129 if (ds_size >= image_size)
130 return -EINVAL;
131
132 if (!sw->safe_mode) {
133 u16 device_id;
134
135 /*
136 * Make sure the device ID in the image matches the one
137 * we read from the switch config space.
138 */
139 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
140 if (device_id != sw->config.device_id)
141 return -EINVAL;
142
143 if (sw->generation < 3) {
144 /* Write CSS headers first */
145 ret = dma_port_flash_write(sw->dma_port,
146 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
147 DMA_PORT_CSS_MAX_SIZE);
148 if (ret)
149 return ret;
150 }
151
152 /* Skip headers in the image */
153 buf += hdr_size;
154 image_size -= hdr_size;
155 }
156
b0407983 157 if (tb_switch_is_usb4(sw))
4b794f80
ML
158 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
159 else
160 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
161 if (!ret)
162 sw->nvm->flushed = true;
163 return ret;
e6b245cc
MW
164}
165
b0407983 166static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
e6b245cc 167{
7a7ebfa8 168 int ret = 0;
e6b245cc
MW
169
170 /*
171 * Root switch NVM upgrade requires that we disconnect the
d1ff7024 172 * existing paths first (in case it is not in safe mode
e6b245cc
MW
173 * already).
174 */
175 if (!sw->safe_mode) {
7a7ebfa8
MW
176 u32 status;
177
d1ff7024 178 ret = tb_domain_disconnect_all_paths(sw->tb);
e6b245cc
MW
179 if (ret)
180 return ret;
181 /*
182 * The host controller goes away pretty soon after this if
183 * everything goes well so getting timeout is expected.
184 */
185 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
186 if (!ret || ret == -ETIMEDOUT)
187 return 0;
188
189 /*
190 * Any error from update auth operation requires power
191 * cycling of the host router.
192 */
193 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
194 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
195 nvm_set_auth_status(sw, status);
e6b245cc
MW
196 }
197
198 /*
199 * From safe mode we can get out by just power cycling the
200 * switch.
201 */
202 dma_port_power_cycle(sw->dma_port);
7a7ebfa8 203 return ret;
e6b245cc
MW
204}
205
b0407983 206static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
e6b245cc
MW
207{
208 int ret, retries = 10;
209
210 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
211 switch (ret) {
212 case 0:
213 case -ETIMEDOUT:
214 case -EACCES:
215 case -EINVAL:
216 /* Power cycle is required */
217 break;
218 default:
e6b245cc 219 return ret;
7a7ebfa8 220 }
e6b245cc
MW
221
222 /*
223 * Poll here for the authentication status. It takes some time
224 * for the device to respond (we get timeout for a while). Once
225 * we get response the device needs to be power cycled in order
226 * to the new NVM to be taken into use.
227 */
228 do {
229 u32 status;
230
231 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
232 if (ret < 0 && ret != -ETIMEDOUT)
233 return ret;
234 if (ret > 0) {
235 if (status) {
236 tb_sw_warn(sw, "failed to authenticate NVM\n");
237 nvm_set_auth_status(sw, status);
238 }
239
240 tb_sw_info(sw, "power cycling the switch now\n");
241 dma_port_power_cycle(sw->dma_port);
242 return 0;
243 }
244
245 msleep(500);
246 } while (--retries);
247
248 return -ETIMEDOUT;
249}
250
b0407983
MW
251static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
252{
253 struct pci_dev *root_port;
254
255 /*
256 * During host router NVM upgrade we should not allow root port to
257 * go into D3cold because some root ports cannot trigger PME
258 * itself. To be on the safe side keep the root port in D0 during
259 * the whole upgrade process.
260 */
6ae72bfa 261 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
262 if (root_port)
263 pm_runtime_get_noresume(&root_port->dev);
264}
265
266static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
267{
268 struct pci_dev *root_port;
269
6ae72bfa 270 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
271 if (root_port)
272 pm_runtime_put(&root_port->dev);
273}
274
275static inline bool nvm_readable(struct tb_switch *sw)
276{
277 if (tb_switch_is_usb4(sw)) {
278 /*
279 * USB4 devices must support NVM operations but it is
280 * optional for hosts. Therefore we query the NVM sector
281 * size here and if it is supported assume NVM
282 * operations are implemented.
283 */
284 return usb4_switch_nvm_sector_size(sw) > 0;
285 }
286
287 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
288 return !!sw->dma_port;
289}
290
291static inline bool nvm_upgradeable(struct tb_switch *sw)
292{
293 if (sw->no_nvm_upgrade)
294 return false;
295 return nvm_readable(sw);
296}
297
298static inline int nvm_read(struct tb_switch *sw, unsigned int address,
299 void *buf, size_t size)
300{
301 if (tb_switch_is_usb4(sw))
302 return usb4_switch_nvm_read(sw, address, buf, size);
303 return dma_port_flash_read(sw->dma_port, address, buf, size);
304}
305
1cbf680f 306static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
b0407983
MW
307{
308 int ret;
309
1cbf680f
MW
310 if (tb_switch_is_usb4(sw)) {
311 if (auth_only) {
312 ret = usb4_switch_nvm_set_offset(sw, 0);
313 if (ret)
314 return ret;
315 }
316 sw->nvm->authenticating = true;
b0407983 317 return usb4_switch_nvm_authenticate(sw);
1cbf680f
MW
318 } else if (auth_only) {
319 return -EOPNOTSUPP;
320 }
b0407983 321
1cbf680f 322 sw->nvm->authenticating = true;
b0407983
MW
323 if (!tb_route(sw)) {
324 nvm_authenticate_start_dma_port(sw);
325 ret = nvm_authenticate_host_dma_port(sw);
326 } else {
327 ret = nvm_authenticate_device_dma_port(sw);
328 }
329
330 return ret;
331}
332
e6b245cc
MW
333static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
334 size_t bytes)
335{
719a5fe8
MW
336 struct tb_nvm *nvm = priv;
337 struct tb_switch *sw = tb_to_switch(nvm->dev);
2d8ff0b5
MW
338 int ret;
339
340 pm_runtime_get_sync(&sw->dev);
4f7c2e0d
MW
341
342 if (!mutex_trylock(&sw->tb->lock)) {
343 ret = restart_syscall();
344 goto out;
345 }
346
b0407983 347 ret = nvm_read(sw, offset, val, bytes);
4f7c2e0d
MW
348 mutex_unlock(&sw->tb->lock);
349
350out:
2d8ff0b5
MW
351 pm_runtime_mark_last_busy(&sw->dev);
352 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 353
2d8ff0b5 354 return ret;
e6b245cc
MW
355}
356
357static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
358 size_t bytes)
359{
719a5fe8
MW
360 struct tb_nvm *nvm = priv;
361 struct tb_switch *sw = tb_to_switch(nvm->dev);
362 int ret;
e6b245cc 363
09f11b6c
MW
364 if (!mutex_trylock(&sw->tb->lock))
365 return restart_syscall();
e6b245cc
MW
366
367 /*
368 * Since writing the NVM image might require some special steps,
369 * for example when CSS headers are written, we cache the image
370 * locally here and handle the special cases when the user asks
371 * us to authenticate the image.
372 */
719a5fe8 373 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
09f11b6c 374 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
375
376 return ret;
377}
378
e6b245cc
MW
379static int tb_switch_nvm_add(struct tb_switch *sw)
380{
719a5fe8 381 struct tb_nvm *nvm;
e6b245cc
MW
382 u32 val;
383 int ret;
384
b0407983 385 if (!nvm_readable(sw))
e6b245cc
MW
386 return 0;
387
b0407983
MW
388 /*
389 * The NVM format of non-Intel hardware is not known so
390 * currently restrict NVM upgrade for Intel hardware. We may
391 * relax this in the future when we learn other NVM formats.
392 */
83d17036
MW
393 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
394 sw->config.vendor_id != 0x8087) {
b0407983
MW
395 dev_info(&sw->dev,
396 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
397 sw->config.vendor_id);
398 return 0;
399 }
400
719a5fe8
MW
401 nvm = tb_nvm_alloc(&sw->dev);
402 if (IS_ERR(nvm))
403 return PTR_ERR(nvm);
e6b245cc
MW
404
405 /*
406 * If the switch is in safe-mode the only accessible portion of
407 * the NVM is the non-active one where userspace is expected to
408 * write new functional NVM.
409 */
410 if (!sw->safe_mode) {
411 u32 nvm_size, hdr_size;
412
b0407983 413 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
e6b245cc 414 if (ret)
719a5fe8 415 goto err_nvm;
e6b245cc
MW
416
417 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
418 nvm_size = (SZ_1M << (val & 7)) / 8;
419 nvm_size = (nvm_size - hdr_size) / 2;
420
b0407983 421 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
e6b245cc 422 if (ret)
719a5fe8 423 goto err_nvm;
e6b245cc
MW
424
425 nvm->major = val >> 16;
426 nvm->minor = val >> 8;
427
719a5fe8
MW
428 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
429 if (ret)
430 goto err_nvm;
e6b245cc
MW
431 }
432
3f415e5e 433 if (!sw->no_nvm_upgrade) {
719a5fe8
MW
434 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
435 tb_switch_nvm_write);
436 if (ret)
437 goto err_nvm;
e6b245cc 438 }
e6b245cc 439
e6b245cc 440 sw->nvm = nvm;
e6b245cc
MW
441 return 0;
442
719a5fe8
MW
443err_nvm:
444 tb_nvm_free(nvm);
e6b245cc
MW
445 return ret;
446}
447
448static void tb_switch_nvm_remove(struct tb_switch *sw)
449{
719a5fe8 450 struct tb_nvm *nvm;
e6b245cc 451
e6b245cc
MW
452 nvm = sw->nvm;
453 sw->nvm = NULL;
e6b245cc
MW
454
455 if (!nvm)
456 return;
457
458 /* Remove authentication status in case the switch is unplugged */
459 if (!nvm->authenticating)
460 nvm_clear_auth_status(sw);
461
719a5fe8 462 tb_nvm_free(nvm);
e6b245cc
MW
463}
464
a25c8b2f
AN
465/* port utility functions */
466
1c561e4e 467static const char *tb_port_type(const struct tb_regs_port_header *port)
a25c8b2f
AN
468{
469 switch (port->type >> 16) {
470 case 0:
471 switch ((u8) port->type) {
472 case 0:
473 return "Inactive";
474 case 1:
475 return "Port";
476 case 2:
477 return "NHI";
478 default:
479 return "unknown";
480 }
481 case 0x2:
482 return "Ethernet";
483 case 0x8:
484 return "SATA";
485 case 0xe:
486 return "DP/HDMI";
487 case 0x10:
488 return "PCIe";
489 case 0x20:
490 return "USB";
491 default:
492 return "unknown";
493 }
494}
495
56ad3aef 496static void tb_dump_port(struct tb *tb, const struct tb_port *port)
a25c8b2f 497{
56ad3aef
MW
498 const struct tb_regs_port_header *regs = &port->config;
499
daa5140f
MW
500 tb_dbg(tb,
501 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
56ad3aef
MW
502 regs->port_number, regs->vendor_id, regs->device_id,
503 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
504 regs->type);
daa5140f 505 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
56ad3aef
MW
506 regs->max_in_hop_id, regs->max_out_hop_id);
507 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
508 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
509 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
510 port->ctl_credits);
a25c8b2f
AN
511}
512
9da672a4
AN
513/**
514 * tb_port_state() - get connectedness state of a port
5cc0df9c 515 * @port: the port to check
9da672a4
AN
516 *
517 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
518 *
519 * Return: Returns an enum tb_port_state on success or an error code on failure.
520 */
5cc0df9c 521int tb_port_state(struct tb_port *port)
9da672a4
AN
522{
523 struct tb_cap_phy phy;
524 int res;
525 if (port->cap_phy == 0) {
526 tb_port_WARN(port, "does not have a PHY\n");
527 return -EINVAL;
528 }
529 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
530 if (res)
531 return res;
532 return phy.state;
533}
534
535/**
536 * tb_wait_for_port() - wait for a port to become ready
5c6b471b
MW
537 * @port: Port to wait
538 * @wait_if_unplugged: Wait also when port is unplugged
9da672a4
AN
539 *
540 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
541 * wait_if_unplugged is set then we also wait if the port is in state
542 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
543 * switch resume). Otherwise we only wait if a device is registered but the link
544 * has not yet been established.
545 *
546 * Return: Returns an error code on failure. Returns 0 if the port is not
547 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
548 * if the port is connected and in state TB_PORT_UP.
549 */
550int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
551{
552 int retries = 10;
553 int state;
554 if (!port->cap_phy) {
555 tb_port_WARN(port, "does not have PHY\n");
556 return -EINVAL;
557 }
558 if (tb_is_upstream_port(port)) {
559 tb_port_WARN(port, "is the upstream port\n");
560 return -EINVAL;
561 }
562
563 while (retries--) {
564 state = tb_port_state(port);
565 if (state < 0)
566 return state;
567 if (state == TB_PORT_DISABLED) {
62efe699 568 tb_port_dbg(port, "is disabled (state: 0)\n");
9da672a4
AN
569 return 0;
570 }
571 if (state == TB_PORT_UNPLUGGED) {
572 if (wait_if_unplugged) {
573 /* used during resume */
62efe699
MW
574 tb_port_dbg(port,
575 "is unplugged (state: 7), retrying...\n");
9da672a4
AN
576 msleep(100);
577 continue;
578 }
62efe699 579 tb_port_dbg(port, "is unplugged (state: 7)\n");
9da672a4
AN
580 return 0;
581 }
582 if (state == TB_PORT_UP) {
62efe699 583 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
9da672a4
AN
584 return 1;
585 }
586
587 /*
588 * After plug-in the state is TB_PORT_CONNECTING. Give it some
589 * time.
590 */
62efe699
MW
591 tb_port_dbg(port,
592 "is connected, link is not up (state: %d), retrying...\n",
593 state);
9da672a4
AN
594 msleep(100);
595 }
596 tb_port_warn(port,
597 "failed to reach state TB_PORT_UP. Ignoring port...\n");
598 return 0;
599}
600
520b6702
AN
601/**
602 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
5c6b471b
MW
603 * @port: Port to add/remove NFC credits
604 * @credits: Credits to add/remove
520b6702
AN
605 *
606 * Change the number of NFC credits allocated to @port by @credits. To remove
607 * NFC credits pass a negative amount of credits.
608 *
609 * Return: Returns 0 on success or an error code on failure.
610 */
611int tb_port_add_nfc_credits(struct tb_port *port, int credits)
612{
c5ee6feb
MW
613 u32 nfc_credits;
614
615 if (credits == 0 || port->sw->is_unplugged)
520b6702 616 return 0;
c5ee6feb 617
edfbd68b
MW
618 /*
619 * USB4 restricts programming NFC buffers to lane adapters only
620 * so skip other ports.
621 */
622 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
623 return 0;
624
8f57d478 625 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
6cb27a04
MW
626 if (credits < 0)
627 credits = max_t(int, -nfc_credits, credits);
628
c5ee6feb
MW
629 nfc_credits += credits;
630
8f57d478
MW
631 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
632 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
c5ee6feb 633
8f57d478 634 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
c5ee6feb
MW
635 port->config.nfc_credits |= nfc_credits;
636
520b6702 637 return tb_port_write(port, &port->config.nfc_credits,
8f57d478 638 TB_CFG_PORT, ADP_CS_4, 1);
520b6702
AN
639}
640
641/**
642 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
5c6b471b
MW
643 * @port: Port whose counters to clear
644 * @counter: Counter index to clear
520b6702
AN
645 *
646 * Return: Returns 0 on success or an error code on failure.
647 */
648int tb_port_clear_counter(struct tb_port *port, int counter)
649{
650 u32 zero[3] = { 0, 0, 0 };
62efe699 651 tb_port_dbg(port, "clearing counter %d\n", counter);
520b6702
AN
652 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
653}
654
b0407983
MW
655/**
656 * tb_port_unlock() - Unlock downstream port
657 * @port: Port to unlock
658 *
659 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
660 * downstream router accessible for CM.
661 */
662int tb_port_unlock(struct tb_port *port)
663{
664 if (tb_switch_is_icm(port->sw))
665 return 0;
666 if (!tb_port_is_null(port))
667 return -EINVAL;
668 if (tb_switch_is_usb4(port->sw))
669 return usb4_port_unlock(port);
670 return 0;
671}
672
341d4518
MW
673static int __tb_port_enable(struct tb_port *port, bool enable)
674{
675 int ret;
676 u32 phy;
677
678 if (!tb_port_is_null(port))
679 return -EINVAL;
680
681 ret = tb_port_read(port, &phy, TB_CFG_PORT,
682 port->cap_phy + LANE_ADP_CS_1, 1);
683 if (ret)
684 return ret;
685
686 if (enable)
687 phy &= ~LANE_ADP_CS_1_LD;
688 else
689 phy |= LANE_ADP_CS_1_LD;
690
691 return tb_port_write(port, &phy, TB_CFG_PORT,
692 port->cap_phy + LANE_ADP_CS_1, 1);
693}
694
695/**
696 * tb_port_enable() - Enable lane adapter
697 * @port: Port to enable (can be %NULL)
698 *
699 * This is used for lane 0 and 1 adapters to enable it.
700 */
701int tb_port_enable(struct tb_port *port)
702{
703 return __tb_port_enable(port, true);
704}
705
706/**
707 * tb_port_disable() - Disable lane adapter
708 * @port: Port to disable (can be %NULL)
709 *
710 * This is used for lane 0 and 1 adapters to disable it.
711 */
712int tb_port_disable(struct tb_port *port)
713{
714 return __tb_port_enable(port, false);
715}
716
47ba5ae4 717/*
a25c8b2f
AN
718 * tb_init_port() - initialize a port
719 *
720 * This is a helper method for tb_switch_alloc. Does not check or initialize
721 * any downstream switches.
722 *
723 * Return: Returns 0 on success or an error code on failure.
724 */
343fcb8c 725static int tb_init_port(struct tb_port *port)
a25c8b2f
AN
726{
727 int res;
9da672a4 728 int cap;
343fcb8c 729
fb7a89ad
SM
730 INIT_LIST_HEAD(&port->list);
731
732 /* Control adapter does not have configuration space */
733 if (!port->port)
734 return 0;
735
a25c8b2f 736 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
d94dcbb1
MW
737 if (res) {
738 if (res == -ENODEV) {
739 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
740 port->port);
8824d19b 741 port->disabled = true;
d94dcbb1
MW
742 return 0;
743 }
a25c8b2f 744 return res;
d94dcbb1 745 }
a25c8b2f 746
9da672a4 747 /* Port 0 is the switch itself and has no PHY. */
fb7a89ad 748 if (port->config.type == TB_TYPE_PORT) {
da2da04b 749 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
9da672a4
AN
750
751 if (cap > 0)
752 port->cap_phy = cap;
753 else
754 tb_port_WARN(port, "non switch port without a PHY\n");
b0407983
MW
755
756 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
757 if (cap > 0)
758 port->cap_usb4 = cap;
56ad3aef
MW
759
760 /*
761 * USB4 ports the buffers allocated for the control path
762 * can be read from the path config space. Legacy
763 * devices we use hard-coded value.
764 */
765 if (tb_switch_is_usb4(port->sw)) {
766 struct tb_regs_hop hop;
767
768 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
769 port->ctl_credits = hop.initial_credits;
770 }
771 if (!port->ctl_credits)
772 port->ctl_credits = 2;
773
fb7a89ad 774 } else {
56183c88
MW
775 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
776 if (cap > 0)
777 port->cap_adap = cap;
9da672a4
AN
778 }
779
56ad3aef
MW
780 port->total_credits =
781 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
782 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
783
784 tb_dump_port(port->sw->tb, port);
a25c8b2f 785 return 0;
a25c8b2f
AN
786}
787
0b2863ac
MW
788static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
789 int max_hopid)
790{
791 int port_max_hopid;
792 struct ida *ida;
793
794 if (in) {
795 port_max_hopid = port->config.max_in_hop_id;
796 ida = &port->in_hopids;
797 } else {
798 port_max_hopid = port->config.max_out_hop_id;
799 ida = &port->out_hopids;
800 }
801
12676423
MW
802 /*
803 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
804 * reserved.
805 */
a3cfebdc 806 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
0b2863ac
MW
807 min_hopid = TB_PATH_MIN_HOPID;
808
809 if (max_hopid < 0 || max_hopid > port_max_hopid)
810 max_hopid = port_max_hopid;
811
812 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
813}
814
815/**
816 * tb_port_alloc_in_hopid() - Allocate input HopID from port
817 * @port: Port to allocate HopID for
818 * @min_hopid: Minimum acceptable input HopID
819 * @max_hopid: Maximum acceptable input HopID
820 *
821 * Return: HopID between @min_hopid and @max_hopid or negative errno in
822 * case of error.
823 */
824int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
825{
826 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
827}
828
829/**
830 * tb_port_alloc_out_hopid() - Allocate output HopID from port
831 * @port: Port to allocate HopID for
832 * @min_hopid: Minimum acceptable output HopID
833 * @max_hopid: Maximum acceptable output HopID
834 *
835 * Return: HopID between @min_hopid and @max_hopid or negative errno in
836 * case of error.
837 */
838int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
839{
840 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
841}
842
843/**
844 * tb_port_release_in_hopid() - Release allocated input HopID from port
845 * @port: Port whose HopID to release
846 * @hopid: HopID to release
847 */
848void tb_port_release_in_hopid(struct tb_port *port, int hopid)
849{
850 ida_simple_remove(&port->in_hopids, hopid);
851}
852
853/**
854 * tb_port_release_out_hopid() - Release allocated output HopID from port
855 * @port: Port whose HopID to release
856 * @hopid: HopID to release
857 */
858void tb_port_release_out_hopid(struct tb_port *port, int hopid)
859{
860 ida_simple_remove(&port->out_hopids, hopid);
861}
862
69eb79f7
MW
863static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
864 const struct tb_switch *sw)
865{
866 u64 mask = (1ULL << parent->config.depth * 8) - 1;
867 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
868}
869
fb19fac1
MW
870/**
871 * tb_next_port_on_path() - Return next port for given port on a path
872 * @start: Start port of the walk
873 * @end: End port of the walk
874 * @prev: Previous port (%NULL if this is the first)
875 *
876 * This function can be used to walk from one port to another if they
877 * are connected through zero or more switches. If the @prev is dual
878 * link port, the function follows that link and returns another end on
879 * that same link.
880 *
881 * If the @end port has been reached, return %NULL.
882 *
883 * Domain tb->lock must be held when this function is called.
884 */
885struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
886 struct tb_port *prev)
887{
888 struct tb_port *next;
889
890 if (!prev)
891 return start;
892
893 if (prev->sw == end->sw) {
894 if (prev == end)
895 return NULL;
896 return end;
897 }
898
69eb79f7
MW
899 if (tb_switch_is_reachable(prev->sw, end->sw)) {
900 next = tb_port_at(tb_route(end->sw), prev->sw);
901 /* Walk down the topology if next == prev */
fb19fac1 902 if (prev->remote &&
69eb79f7 903 (next == prev || next->dual_link_port == prev))
fb19fac1 904 next = prev->remote;
fb19fac1
MW
905 } else {
906 if (tb_is_upstream_port(prev)) {
907 next = prev->remote;
908 } else {
909 next = tb_upstream_port(prev->sw);
910 /*
911 * Keep the same link if prev and next are both
912 * dual link ports.
913 */
914 if (next->dual_link_port &&
915 next->link_nr != prev->link_nr) {
916 next = next->dual_link_port;
917 }
918 }
919 }
920
69eb79f7 921 return next != prev ? next : NULL;
fb19fac1
MW
922}
923
5b7b8c0a
MW
924/**
925 * tb_port_get_link_speed() - Get current link speed
926 * @port: Port to check (USB4 or CIO)
927 *
928 * Returns link speed in Gb/s or negative errno in case of failure.
929 */
930int tb_port_get_link_speed(struct tb_port *port)
91c0c120
MW
931{
932 u32 val, speed;
933 int ret;
934
935 if (!port->cap_phy)
936 return -EINVAL;
937
938 ret = tb_port_read(port, &val, TB_CFG_PORT,
939 port->cap_phy + LANE_ADP_CS_1, 1);
940 if (ret)
941 return ret;
942
943 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
944 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
945 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
946}
947
4210d50f
IH
948/**
949 * tb_port_get_link_width() - Get current link width
950 * @port: Port to check (USB4 or CIO)
951 *
952 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
953 * or negative errno in case of failure.
954 */
955int tb_port_get_link_width(struct tb_port *port)
91c0c120
MW
956{
957 u32 val;
958 int ret;
959
960 if (!port->cap_phy)
961 return -EINVAL;
962
963 ret = tb_port_read(port, &val, TB_CFG_PORT,
964 port->cap_phy + LANE_ADP_CS_1, 1);
965 if (ret)
966 return ret;
967
968 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
969 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
970}
971
972static bool tb_port_is_width_supported(struct tb_port *port, int width)
973{
974 u32 phy, widths;
975 int ret;
976
977 if (!port->cap_phy)
978 return false;
979
980 ret = tb_port_read(port, &phy, TB_CFG_PORT,
981 port->cap_phy + LANE_ADP_CS_0, 1);
982 if (ret)
e9d0e751 983 return false;
91c0c120
MW
984
985 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
986 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
987
988 return !!(widths & width);
989}
990
991static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
992{
993 u32 val;
994 int ret;
995
996 if (!port->cap_phy)
997 return -EINVAL;
998
999 ret = tb_port_read(port, &val, TB_CFG_PORT,
1000 port->cap_phy + LANE_ADP_CS_1, 1);
1001 if (ret)
1002 return ret;
1003
1004 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1005 switch (width) {
1006 case 1:
1007 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1008 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1009 break;
1010 case 2:
1011 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1012 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1013 break;
1014 default:
1015 return -EINVAL;
1016 }
1017
1018 val |= LANE_ADP_CS_1_LB;
1019
1020 return tb_port_write(port, &val, TB_CFG_PORT,
1021 port->cap_phy + LANE_ADP_CS_1, 1);
1022}
1023
5cc0df9c
IH
1024/**
1025 * tb_port_lane_bonding_enable() - Enable bonding on port
1026 * @port: port to enable
1027 *
e7051bea
MW
1028 * Enable bonding by setting the link width of the port and the other
1029 * port in case of dual link port. Does not wait for the link to
1030 * actually reach the bonded state so caller needs to call
1031 * tb_port_wait_for_link_width() before enabling any paths through the
1032 * link to make sure the link is in expected state.
5cc0df9c
IH
1033 *
1034 * Return: %0 in case of success and negative errno in case of error
1035 */
1036int tb_port_lane_bonding_enable(struct tb_port *port)
91c0c120
MW
1037{
1038 int ret;
1039
1040 /*
1041 * Enable lane bonding for both links if not already enabled by
1042 * for example the boot firmware.
1043 */
1044 ret = tb_port_get_link_width(port);
1045 if (ret == 1) {
1046 ret = tb_port_set_link_width(port, 2);
1047 if (ret)
1048 return ret;
1049 }
1050
1051 ret = tb_port_get_link_width(port->dual_link_port);
1052 if (ret == 1) {
1053 ret = tb_port_set_link_width(port->dual_link_port, 2);
1054 if (ret) {
1055 tb_port_set_link_width(port, 1);
1056 return ret;
1057 }
1058 }
1059
1060 port->bonded = true;
1061 port->dual_link_port->bonded = true;
1062
1063 return 0;
1064}
1065
5cc0df9c
IH
1066/**
1067 * tb_port_lane_bonding_disable() - Disable bonding on port
1068 * @port: port to disable
1069 *
1070 * Disable bonding by setting the link width of the port and the
1071 * other port in case of dual link port.
1072 *
1073 */
1074void tb_port_lane_bonding_disable(struct tb_port *port)
91c0c120
MW
1075{
1076 port->dual_link_port->bonded = false;
1077 port->bonded = false;
1078
1079 tb_port_set_link_width(port->dual_link_port, 1);
1080 tb_port_set_link_width(port, 1);
1081}
1082
e7051bea
MW
1083/**
1084 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1085 * @port: Port to wait for
1086 * @width: Expected link width (%1 or %2)
1087 * @timeout_msec: Timeout in ms how long to wait
1088 *
1089 * Should be used after both ends of the link have been bonded (or
1090 * bonding has been disabled) to wait until the link actually reaches
1091 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1092 * within the given timeout, %0 if it did.
1093 */
1094int tb_port_wait_for_link_width(struct tb_port *port, int width,
1095 int timeout_msec)
1096{
1097 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1098 int ret;
1099
1100 do {
1101 ret = tb_port_get_link_width(port);
1102 if (ret < 0)
1103 return ret;
1104 else if (ret == width)
1105 return 0;
1106
1107 usleep_range(1000, 2000);
1108 } while (ktime_before(ktime_get(), timeout));
1109
1110 return -ETIMEDOUT;
1111}
1112
69fea377
MW
1113static int tb_port_do_update_credits(struct tb_port *port)
1114{
1115 u32 nfc_credits;
1116 int ret;
1117
1118 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1119 if (ret)
1120 return ret;
1121
1122 if (nfc_credits != port->config.nfc_credits) {
1123 u32 total;
1124
1125 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1126 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1127
1128 tb_port_dbg(port, "total credits changed %u -> %u\n",
1129 port->total_credits, total);
1130
1131 port->config.nfc_credits = nfc_credits;
1132 port->total_credits = total;
1133 }
1134
1135 return 0;
1136}
1137
1138/**
1139 * tb_port_update_credits() - Re-read port total credits
1140 * @port: Port to update
1141 *
1142 * After the link is bonded (or bonding was disabled) the port total
1143 * credits may change, so this function needs to be called to re-read
1144 * the credits. Updates also the second lane adapter.
1145 */
1146int tb_port_update_credits(struct tb_port *port)
1147{
1148 int ret;
1149
1150 ret = tb_port_do_update_credits(port);
1151 if (ret)
1152 return ret;
1153 return tb_port_do_update_credits(port->dual_link_port);
1154}
1155
fdb0887c
MW
1156static int tb_port_start_lane_initialization(struct tb_port *port)
1157{
1158 int ret;
1159
1160 if (tb_switch_is_usb4(port->sw))
1161 return 0;
1162
1163 ret = tb_lc_start_lane_initialization(port);
1164 return ret == -EINVAL ? 0 : ret;
1165}
1166
3fb10ea4
RM
1167/*
1168 * Returns true if the port had something (router, XDomain) connected
1169 * before suspend.
1170 */
1171static bool tb_port_resume(struct tb_port *port)
1172{
1173 bool has_remote = tb_port_has_remote(port);
1174
1175 if (port->usb4) {
1176 usb4_port_device_resume(port->usb4);
1177 } else if (!has_remote) {
1178 /*
1179 * For disconnected downstream lane adapters start lane
1180 * initialization now so we detect future connects.
1181 *
1182 * For XDomain start the lane initialzation now so the
1183 * link gets re-established.
1184 *
1185 * This is only needed for non-USB4 ports.
1186 */
1187 if (!tb_is_upstream_port(port) || port->xdomain)
1188 tb_port_start_lane_initialization(port);
1189 }
1190
1191 return has_remote || port->xdomain;
1192}
1193
e78db6f0
MW
1194/**
1195 * tb_port_is_enabled() - Is the adapter port enabled
1196 * @port: Port to check
1197 */
1198bool tb_port_is_enabled(struct tb_port *port)
1199{
1200 switch (port->config.type) {
1201 case TB_TYPE_PCIE_UP:
1202 case TB_TYPE_PCIE_DOWN:
1203 return tb_pci_port_is_enabled(port);
1204
4f807e47
MW
1205 case TB_TYPE_DP_HDMI_IN:
1206 case TB_TYPE_DP_HDMI_OUT:
1207 return tb_dp_port_is_enabled(port);
1208
e6f81858
RM
1209 case TB_TYPE_USB3_UP:
1210 case TB_TYPE_USB3_DOWN:
1211 return tb_usb3_port_is_enabled(port);
1212
e78db6f0
MW
1213 default:
1214 return false;
1215 }
1216}
1217
e6f81858
RM
1218/**
1219 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1220 * @port: USB3 adapter port to check
1221 */
1222bool tb_usb3_port_is_enabled(struct tb_port *port)
1223{
1224 u32 data;
1225
1226 if (tb_port_read(port, &data, TB_CFG_PORT,
1227 port->cap_adap + ADP_USB3_CS_0, 1))
1228 return false;
1229
1230 return !!(data & ADP_USB3_CS_0_PE);
1231}
1232
1233/**
1234 * tb_usb3_port_enable() - Enable USB3 adapter port
1235 * @port: USB3 adapter port to enable
1236 * @enable: Enable/disable the USB3 adapter
1237 */
1238int tb_usb3_port_enable(struct tb_port *port, bool enable)
1239{
1240 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1241 : ADP_USB3_CS_0_V;
1242
1243 if (!port->cap_adap)
1244 return -ENXIO;
1245 return tb_port_write(port, &word, TB_CFG_PORT,
1246 port->cap_adap + ADP_USB3_CS_0, 1);
1247}
1248
0414bec5
MW
1249/**
1250 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1251 * @port: PCIe port to check
1252 */
1253bool tb_pci_port_is_enabled(struct tb_port *port)
1254{
1255 u32 data;
1256
778bfca3
MW
1257 if (tb_port_read(port, &data, TB_CFG_PORT,
1258 port->cap_adap + ADP_PCIE_CS_0, 1))
0414bec5
MW
1259 return false;
1260
778bfca3 1261 return !!(data & ADP_PCIE_CS_0_PE);
0414bec5
MW
1262}
1263
93f36ade
MW
1264/**
1265 * tb_pci_port_enable() - Enable PCIe adapter port
1266 * @port: PCIe port to enable
1267 * @enable: Enable/disable the PCIe adapter
1268 */
1269int tb_pci_port_enable(struct tb_port *port, bool enable)
1270{
778bfca3 1271 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
93f36ade
MW
1272 if (!port->cap_adap)
1273 return -ENXIO;
778bfca3
MW
1274 return tb_port_write(port, &word, TB_CFG_PORT,
1275 port->cap_adap + ADP_PCIE_CS_0, 1);
93f36ade
MW
1276}
1277
4f807e47
MW
1278/**
1279 * tb_dp_port_hpd_is_active() - Is HPD already active
1280 * @port: DP out port to check
1281 *
1282 * Checks if the DP OUT adapter port has HDP bit already set.
1283 */
1284int tb_dp_port_hpd_is_active(struct tb_port *port)
1285{
1286 u32 data;
1287 int ret;
1288
98176380
MW
1289 ret = tb_port_read(port, &data, TB_CFG_PORT,
1290 port->cap_adap + ADP_DP_CS_2, 1);
4f807e47
MW
1291 if (ret)
1292 return ret;
1293
98176380 1294 return !!(data & ADP_DP_CS_2_HDP);
4f807e47
MW
1295}
1296
1297/**
1298 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1299 * @port: Port to clear HPD
1300 *
1301 * If the DP IN port has HDP set, this function can be used to clear it.
1302 */
1303int tb_dp_port_hpd_clear(struct tb_port *port)
1304{
1305 u32 data;
1306 int ret;
1307
98176380
MW
1308 ret = tb_port_read(port, &data, TB_CFG_PORT,
1309 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1310 if (ret)
1311 return ret;
1312
98176380
MW
1313 data |= ADP_DP_CS_3_HDPC;
1314 return tb_port_write(port, &data, TB_CFG_PORT,
1315 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1316}
1317
1318/**
1319 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1320 * @port: DP IN/OUT port to set hops
1321 * @video: Video Hop ID
1322 * @aux_tx: AUX TX Hop ID
1323 * @aux_rx: AUX RX Hop ID
1324 *
e5bb88e9
MW
1325 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1326 * router DP adapters too but does not program the values as the fields
1327 * are read-only.
4f807e47
MW
1328 */
1329int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1330 unsigned int aux_tx, unsigned int aux_rx)
1331{
1332 u32 data[2];
1333 int ret;
1334
e5bb88e9
MW
1335 if (tb_switch_is_usb4(port->sw))
1336 return 0;
1337
98176380
MW
1338 ret = tb_port_read(port, data, TB_CFG_PORT,
1339 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1340 if (ret)
1341 return ret;
1342
98176380
MW
1343 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1344 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1345 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1346
98176380
MW
1347 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1348 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1349 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1350 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1351 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1352
98176380
MW
1353 return tb_port_write(port, data, TB_CFG_PORT,
1354 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1355}
1356
1357/**
1358 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1359 * @port: DP adapter port to check
1360 */
1361bool tb_dp_port_is_enabled(struct tb_port *port)
1362{
fd5c46b7 1363 u32 data[2];
4f807e47 1364
98176380 1365 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
fd5c46b7 1366 ARRAY_SIZE(data)))
4f807e47
MW
1367 return false;
1368
98176380 1369 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
4f807e47
MW
1370}
1371
1372/**
1373 * tb_dp_port_enable() - Enables/disables DP paths of a port
1374 * @port: DP IN/OUT port
1375 * @enable: Enable/disable DP path
1376 *
1377 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1378 * calling this function.
1379 */
1380int tb_dp_port_enable(struct tb_port *port, bool enable)
1381{
fd5c46b7 1382 u32 data[2];
4f807e47
MW
1383 int ret;
1384
98176380
MW
1385 ret = tb_port_read(port, data, TB_CFG_PORT,
1386 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1387 if (ret)
1388 return ret;
1389
1390 if (enable)
98176380 1391 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
4f807e47 1392 else
98176380 1393 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
4f807e47 1394
98176380
MW
1395 return tb_port_write(port, data, TB_CFG_PORT,
1396 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1397}
1398
a25c8b2f
AN
1399/* switch utility functions */
1400
b0407983
MW
1401static const char *tb_switch_generation_name(const struct tb_switch *sw)
1402{
1403 switch (sw->generation) {
1404 case 1:
1405 return "Thunderbolt 1";
1406 case 2:
1407 return "Thunderbolt 2";
1408 case 3:
1409 return "Thunderbolt 3";
1410 case 4:
1411 return "USB4";
1412 default:
1413 return "Unknown";
1414 }
1415}
1416
1417static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
a25c8b2f 1418{
b0407983
MW
1419 const struct tb_regs_switch_header *regs = &sw->config;
1420
1421 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1422 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1423 regs->revision, regs->thunderbolt_version);
1424 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
daa5140f
MW
1425 tb_dbg(tb, " Config:\n");
1426 tb_dbg(tb,
a25c8b2f 1427 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
b0407983
MW
1428 regs->upstream_port_number, regs->depth,
1429 (((u64) regs->route_hi) << 32) | regs->route_lo,
1430 regs->enabled, regs->plug_events_delay);
daa5140f 1431 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
b0407983 1432 regs->__unknown1, regs->__unknown4);
a25c8b2f
AN
1433}
1434
23dd5bb4 1435/**
2c2a2327 1436 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
356b6c4e 1437 * @sw: Switch to reset
23dd5bb4
AN
1438 *
1439 * Return: Returns 0 on success or an error code on failure.
1440 */
356b6c4e 1441int tb_switch_reset(struct tb_switch *sw)
23dd5bb4
AN
1442{
1443 struct tb_cfg_result res;
356b6c4e
MW
1444
1445 if (sw->generation > 1)
1446 return 0;
1447
1448 tb_sw_dbg(sw, "resetting switch\n");
1449
1450 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1451 TB_CFG_SWITCH, 2, 2);
23dd5bb4
AN
1452 if (res.err)
1453 return res.err;
bda83aec 1454 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
23dd5bb4
AN
1455 if (res.err > 0)
1456 return -EIO;
1457 return res.err;
1458}
1459
1639664f
GF
1460/**
1461 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1462 * @sw: Router to read the offset value from
1463 * @offset: Offset in the router config space to read from
1464 * @bit: Bit mask in the offset to wait for
1465 * @value: Value of the bits to wait for
1466 * @timeout_msec: Timeout in ms how long to wait
1467 *
1468 * Wait till the specified bits in specified offset reach specified value.
1469 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1470 * within the given timeout or a negative errno in case of failure.
1471 */
1472int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1473 u32 value, int timeout_msec)
1474{
1475 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1476
1477 do {
1478 u32 val;
1479 int ret;
1480
1481 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1482 if (ret)
1483 return ret;
1484
1485 if ((val & bit) == value)
1486 return 0;
1487
1488 usleep_range(50, 100);
1489 } while (ktime_before(ktime_get(), timeout));
1490
1491 return -ETIMEDOUT;
1492}
1493
47ba5ae4 1494/*
ca389f71
AN
1495 * tb_plug_events_active() - enable/disable plug events on a switch
1496 *
1497 * Also configures a sane plug_events_delay of 255ms.
1498 *
1499 * Return: Returns 0 on success or an error code on failure.
1500 */
1501static int tb_plug_events_active(struct tb_switch *sw, bool active)
1502{
1503 u32 data;
1504 int res;
1505
5cb6ed31 1506 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
bfe778ac
MW
1507 return 0;
1508
ca389f71
AN
1509 sw->config.plug_events_delay = 0xff;
1510 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1511 if (res)
1512 return res;
1513
1514 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1515 if (res)
1516 return res;
1517
1518 if (active) {
1519 data = data & 0xFFFFFF83;
1520 switch (sw->config.device_id) {
1d111406
LW
1521 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1522 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1523 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
ca389f71
AN
1524 break;
1525 default:
1526 data |= 4;
1527 }
1528 } else {
1529 data = data | 0x7c;
1530 }
1531 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1532 sw->cap_plug_events + 1, 1);
1533}
1534
f67cf491
MW
1535static ssize_t authorized_show(struct device *dev,
1536 struct device_attribute *attr,
1537 char *buf)
1538{
1539 struct tb_switch *sw = tb_to_switch(dev);
1540
1541 return sprintf(buf, "%u\n", sw->authorized);
1542}
1543
3da88be2
MW
1544static int disapprove_switch(struct device *dev, void *not_used)
1545{
1651d9e7 1546 char *envp[] = { "AUTHORIZED=0", NULL };
3da88be2
MW
1547 struct tb_switch *sw;
1548
1549 sw = tb_to_switch(dev);
1550 if (sw && sw->authorized) {
1551 int ret;
1552
1553 /* First children */
1554 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1555 if (ret)
1556 return ret;
1557
1558 ret = tb_domain_disapprove_switch(sw->tb, sw);
1559 if (ret)
1560 return ret;
1561
1562 sw->authorized = 0;
1651d9e7 1563 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
3da88be2
MW
1564 }
1565
1566 return 0;
1567}
1568
f67cf491
MW
1569static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1570{
1651d9e7 1571 char envp_string[13];
f67cf491 1572 int ret = -EINVAL;
1651d9e7 1573 char *envp[] = { envp_string, NULL };
f67cf491 1574
09f11b6c
MW
1575 if (!mutex_trylock(&sw->tb->lock))
1576 return restart_syscall();
f67cf491 1577
3da88be2 1578 if (!!sw->authorized == !!val)
f67cf491
MW
1579 goto unlock;
1580
1581 switch (val) {
3da88be2
MW
1582 /* Disapprove switch */
1583 case 0:
1584 if (tb_route(sw)) {
1585 ret = disapprove_switch(&sw->dev, NULL);
1586 goto unlock;
1587 }
1588 break;
1589
f67cf491
MW
1590 /* Approve switch */
1591 case 1:
1592 if (sw->key)
1593 ret = tb_domain_approve_switch_key(sw->tb, sw);
1594 else
1595 ret = tb_domain_approve_switch(sw->tb, sw);
1596 break;
1597
1598 /* Challenge switch */
1599 case 2:
1600 if (sw->key)
1601 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1602 break;
1603
1604 default:
1605 break;
1606 }
1607
1608 if (!ret) {
1609 sw->authorized = val;
1651d9e7
RJ
1610 /*
1611 * Notify status change to the userspace, informing the new
1612 * value of /sys/bus/thunderbolt/devices/.../authorized.
1613 */
1614 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1615 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
f67cf491
MW
1616 }
1617
1618unlock:
09f11b6c 1619 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1620 return ret;
1621}
1622
1623static ssize_t authorized_store(struct device *dev,
1624 struct device_attribute *attr,
1625 const char *buf, size_t count)
1626{
1627 struct tb_switch *sw = tb_to_switch(dev);
1628 unsigned int val;
1629 ssize_t ret;
1630
1631 ret = kstrtouint(buf, 0, &val);
1632 if (ret)
1633 return ret;
1634 if (val > 2)
1635 return -EINVAL;
1636
4f7c2e0d 1637 pm_runtime_get_sync(&sw->dev);
f67cf491 1638 ret = tb_switch_set_authorized(sw, val);
4f7c2e0d
MW
1639 pm_runtime_mark_last_busy(&sw->dev);
1640 pm_runtime_put_autosuspend(&sw->dev);
f67cf491
MW
1641
1642 return ret ? ret : count;
1643}
1644static DEVICE_ATTR_RW(authorized);
1645
14862ee3
YB
1646static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1647 char *buf)
1648{
1649 struct tb_switch *sw = tb_to_switch(dev);
1650
1651 return sprintf(buf, "%u\n", sw->boot);
1652}
1653static DEVICE_ATTR_RO(boot);
1654
bfe778ac
MW
1655static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1656 char *buf)
1657{
1658 struct tb_switch *sw = tb_to_switch(dev);
ca389f71 1659
bfe778ac
MW
1660 return sprintf(buf, "%#x\n", sw->device);
1661}
1662static DEVICE_ATTR_RO(device);
1663
72ee3390
MW
1664static ssize_t
1665device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1666{
1667 struct tb_switch *sw = tb_to_switch(dev);
1668
1669 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1670}
1671static DEVICE_ATTR_RO(device_name);
1672
b406357c
CK
1673static ssize_t
1674generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1675{
1676 struct tb_switch *sw = tb_to_switch(dev);
1677
1678 return sprintf(buf, "%u\n", sw->generation);
1679}
1680static DEVICE_ATTR_RO(generation);
1681
f67cf491
MW
1682static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1683 char *buf)
1684{
1685 struct tb_switch *sw = tb_to_switch(dev);
1686 ssize_t ret;
1687
09f11b6c
MW
1688 if (!mutex_trylock(&sw->tb->lock))
1689 return restart_syscall();
f67cf491
MW
1690
1691 if (sw->key)
1692 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1693 else
1694 ret = sprintf(buf, "\n");
1695
09f11b6c 1696 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1697 return ret;
1698}
1699
1700static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1701 const char *buf, size_t count)
1702{
1703 struct tb_switch *sw = tb_to_switch(dev);
1704 u8 key[TB_SWITCH_KEY_SIZE];
1705 ssize_t ret = count;
e545f0d8 1706 bool clear = false;
f67cf491 1707
e545f0d8
BY
1708 if (!strcmp(buf, "\n"))
1709 clear = true;
1710 else if (hex2bin(key, buf, sizeof(key)))
f67cf491
MW
1711 return -EINVAL;
1712
09f11b6c
MW
1713 if (!mutex_trylock(&sw->tb->lock))
1714 return restart_syscall();
f67cf491
MW
1715
1716 if (sw->authorized) {
1717 ret = -EBUSY;
1718 } else {
1719 kfree(sw->key);
e545f0d8
BY
1720 if (clear) {
1721 sw->key = NULL;
1722 } else {
1723 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1724 if (!sw->key)
1725 ret = -ENOMEM;
1726 }
f67cf491
MW
1727 }
1728
09f11b6c 1729 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1730 return ret;
1731}
0956e411 1732static DEVICE_ATTR(key, 0600, key_show, key_store);
f67cf491 1733
91c0c120
MW
1734static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1735 char *buf)
1736{
1737 struct tb_switch *sw = tb_to_switch(dev);
1738
1739 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1740}
1741
1742/*
1743 * Currently all lanes must run at the same speed but we expose here
1744 * both directions to allow possible asymmetric links in the future.
1745 */
1746static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1747static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1748
1749static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1750 char *buf)
1751{
1752 struct tb_switch *sw = tb_to_switch(dev);
1753
1754 return sprintf(buf, "%u\n", sw->link_width);
1755}
1756
1757/*
1758 * Currently link has same amount of lanes both directions (1 or 2) but
1759 * expose them separately to allow possible asymmetric links in the future.
1760 */
1761static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1762static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1763
e6b245cc
MW
1764static ssize_t nvm_authenticate_show(struct device *dev,
1765 struct device_attribute *attr, char *buf)
1766{
1767 struct tb_switch *sw = tb_to_switch(dev);
1768 u32 status;
1769
1770 nvm_get_auth_status(sw, &status);
1771 return sprintf(buf, "%#x\n", status);
1772}
1773
1cb36293
ML
1774static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1775 bool disconnect)
e6b245cc
MW
1776{
1777 struct tb_switch *sw = tb_to_switch(dev);
1cbf680f 1778 int val, ret;
e6b245cc 1779
4f7c2e0d
MW
1780 pm_runtime_get_sync(&sw->dev);
1781
1782 if (!mutex_trylock(&sw->tb->lock)) {
1783 ret = restart_syscall();
1784 goto exit_rpm;
1785 }
e6b245cc
MW
1786
1787 /* If NVMem devices are not yet added */
1788 if (!sw->nvm) {
1789 ret = -EAGAIN;
1790 goto exit_unlock;
1791 }
1792
4b794f80 1793 ret = kstrtoint(buf, 10, &val);
e6b245cc
MW
1794 if (ret)
1795 goto exit_unlock;
1796
1797 /* Always clear the authentication status */
1798 nvm_clear_auth_status(sw);
1799
4b794f80 1800 if (val > 0) {
1cbf680f
MW
1801 if (val == AUTHENTICATE_ONLY) {
1802 if (disconnect)
4b794f80 1803 ret = -EINVAL;
1cbf680f
MW
1804 else
1805 ret = nvm_authenticate(sw, true);
1806 } else {
1807 if (!sw->nvm->flushed) {
1808 if (!sw->nvm->buf) {
1809 ret = -EINVAL;
1810 goto exit_unlock;
1811 }
1812
1813 ret = nvm_validate_and_write(sw);
1814 if (ret || val == WRITE_ONLY)
1815 goto exit_unlock;
4b794f80 1816 }
1cbf680f
MW
1817 if (val == WRITE_AND_AUTHENTICATE) {
1818 if (disconnect)
1819 ret = tb_lc_force_power(sw);
1820 else
1821 ret = nvm_authenticate(sw, false);
1cb36293 1822 }
4b794f80 1823 }
e6b245cc
MW
1824 }
1825
1826exit_unlock:
09f11b6c 1827 mutex_unlock(&sw->tb->lock);
4f7c2e0d
MW
1828exit_rpm:
1829 pm_runtime_mark_last_busy(&sw->dev);
1830 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 1831
1cb36293
ML
1832 return ret;
1833}
1834
1835static ssize_t nvm_authenticate_store(struct device *dev,
1836 struct device_attribute *attr, const char *buf, size_t count)
1837{
1838 int ret = nvm_authenticate_sysfs(dev, buf, false);
e6b245cc
MW
1839 if (ret)
1840 return ret;
1841 return count;
1842}
1843static DEVICE_ATTR_RW(nvm_authenticate);
1844
1cb36293
ML
1845static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1846 struct device_attribute *attr, char *buf)
1847{
1848 return nvm_authenticate_show(dev, attr, buf);
1849}
1850
1851static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1852 struct device_attribute *attr, const char *buf, size_t count)
1853{
1854 int ret;
1855
1856 ret = nvm_authenticate_sysfs(dev, buf, true);
1857 return ret ? ret : count;
1858}
1859static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1860
e6b245cc
MW
1861static ssize_t nvm_version_show(struct device *dev,
1862 struct device_attribute *attr, char *buf)
1863{
1864 struct tb_switch *sw = tb_to_switch(dev);
1865 int ret;
1866
09f11b6c
MW
1867 if (!mutex_trylock(&sw->tb->lock))
1868 return restart_syscall();
e6b245cc
MW
1869
1870 if (sw->safe_mode)
1871 ret = -ENODATA;
1872 else if (!sw->nvm)
1873 ret = -EAGAIN;
1874 else
1875 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1876
09f11b6c 1877 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
1878
1879 return ret;
1880}
1881static DEVICE_ATTR_RO(nvm_version);
1882
bfe778ac
MW
1883static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1884 char *buf)
a25c8b2f 1885{
bfe778ac 1886 struct tb_switch *sw = tb_to_switch(dev);
a25c8b2f 1887
bfe778ac
MW
1888 return sprintf(buf, "%#x\n", sw->vendor);
1889}
1890static DEVICE_ATTR_RO(vendor);
1891
72ee3390
MW
1892static ssize_t
1893vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1894{
1895 struct tb_switch *sw = tb_to_switch(dev);
1896
1897 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1898}
1899static DEVICE_ATTR_RO(vendor_name);
1900
bfe778ac
MW
1901static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1902 char *buf)
1903{
1904 struct tb_switch *sw = tb_to_switch(dev);
1905
1906 return sprintf(buf, "%pUb\n", sw->uuid);
1907}
1908static DEVICE_ATTR_RO(unique_id);
1909
1910static struct attribute *switch_attrs[] = {
f67cf491 1911 &dev_attr_authorized.attr,
14862ee3 1912 &dev_attr_boot.attr,
bfe778ac 1913 &dev_attr_device.attr,
72ee3390 1914 &dev_attr_device_name.attr,
b406357c 1915 &dev_attr_generation.attr,
f67cf491 1916 &dev_attr_key.attr,
e6b245cc 1917 &dev_attr_nvm_authenticate.attr,
1cb36293 1918 &dev_attr_nvm_authenticate_on_disconnect.attr,
e6b245cc 1919 &dev_attr_nvm_version.attr,
91c0c120
MW
1920 &dev_attr_rx_speed.attr,
1921 &dev_attr_rx_lanes.attr,
1922 &dev_attr_tx_speed.attr,
1923 &dev_attr_tx_lanes.attr,
bfe778ac 1924 &dev_attr_vendor.attr,
72ee3390 1925 &dev_attr_vendor_name.attr,
bfe778ac
MW
1926 &dev_attr_unique_id.attr,
1927 NULL,
1928};
1929
f67cf491
MW
1930static umode_t switch_attr_is_visible(struct kobject *kobj,
1931 struct attribute *attr, int n)
1932{
fff15f23 1933 struct device *dev = kobj_to_dev(kobj);
f67cf491
MW
1934 struct tb_switch *sw = tb_to_switch(dev);
1935
3cd542e6
MW
1936 if (attr == &dev_attr_authorized.attr) {
1937 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
8e334125 1938 sw->tb->security_level == TB_SECURITY_DPONLY)
3cd542e6
MW
1939 return 0;
1940 } else if (attr == &dev_attr_device.attr) {
58f414fa
MW
1941 if (!sw->device)
1942 return 0;
1943 } else if (attr == &dev_attr_device_name.attr) {
1944 if (!sw->device_name)
1945 return 0;
1946 } else if (attr == &dev_attr_vendor.attr) {
1947 if (!sw->vendor)
1948 return 0;
1949 } else if (attr == &dev_attr_vendor_name.attr) {
1950 if (!sw->vendor_name)
1951 return 0;
1952 } else if (attr == &dev_attr_key.attr) {
f67cf491
MW
1953 if (tb_route(sw) &&
1954 sw->tb->security_level == TB_SECURITY_SECURE &&
1955 sw->security_level == TB_SECURITY_SECURE)
1956 return attr->mode;
1957 return 0;
91c0c120
MW
1958 } else if (attr == &dev_attr_rx_speed.attr ||
1959 attr == &dev_attr_rx_lanes.attr ||
1960 attr == &dev_attr_tx_speed.attr ||
1961 attr == &dev_attr_tx_lanes.attr) {
1962 if (tb_route(sw))
1963 return attr->mode;
1964 return 0;
3f415e5e 1965 } else if (attr == &dev_attr_nvm_authenticate.attr) {
b0407983 1966 if (nvm_upgradeable(sw))
3f415e5e
MW
1967 return attr->mode;
1968 return 0;
1969 } else if (attr == &dev_attr_nvm_version.attr) {
b0407983 1970 if (nvm_readable(sw))
e6b245cc
MW
1971 return attr->mode;
1972 return 0;
14862ee3
YB
1973 } else if (attr == &dev_attr_boot.attr) {
1974 if (tb_route(sw))
1975 return attr->mode;
1976 return 0;
1cb36293
ML
1977 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1978 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1979 return attr->mode;
1980 return 0;
f67cf491
MW
1981 }
1982
e6b245cc 1983 return sw->safe_mode ? 0 : attr->mode;
f67cf491
MW
1984}
1985
6889e00f 1986static const struct attribute_group switch_group = {
f67cf491 1987 .is_visible = switch_attr_is_visible,
bfe778ac
MW
1988 .attrs = switch_attrs,
1989};
ca389f71 1990
bfe778ac
MW
1991static const struct attribute_group *switch_groups[] = {
1992 &switch_group,
1993 NULL,
1994};
1995
1996static void tb_switch_release(struct device *dev)
1997{
1998 struct tb_switch *sw = tb_to_switch(dev);
b433d010 1999 struct tb_port *port;
bfe778ac 2000
3e136768
MW
2001 dma_port_free(sw->dma_port);
2002
b433d010 2003 tb_switch_for_each_port(sw, port) {
781e14ea
MW
2004 ida_destroy(&port->in_hopids);
2005 ida_destroy(&port->out_hopids);
0b2863ac
MW
2006 }
2007
bfe778ac 2008 kfree(sw->uuid);
72ee3390
MW
2009 kfree(sw->device_name);
2010 kfree(sw->vendor_name);
a25c8b2f 2011 kfree(sw->ports);
343fcb8c 2012 kfree(sw->drom);
f67cf491 2013 kfree(sw->key);
a25c8b2f
AN
2014 kfree(sw);
2015}
2016
2f608ba1
MW
2017static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2018{
2019 struct tb_switch *sw = tb_to_switch(dev);
2020 const char *type;
2021
2022 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2023 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2024 return -ENOMEM;
2025 }
2026
2027 if (!tb_route(sw)) {
2028 type = "host";
2029 } else {
2030 const struct tb_port *port;
2031 bool hub = false;
2032
2033 /* Device is hub if it has any downstream ports */
2034 tb_switch_for_each_port(sw, port) {
2035 if (!port->disabled && !tb_is_upstream_port(port) &&
2036 tb_port_is_null(port)) {
2037 hub = true;
2038 break;
2039 }
2040 }
2041
2042 type = hub ? "hub" : "device";
2043 }
2044
2045 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2046 return -ENOMEM;
2047 return 0;
2048}
2049
2d8ff0b5
MW
2050/*
2051 * Currently only need to provide the callbacks. Everything else is handled
2052 * in the connection manager.
2053 */
2054static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2055{
4f7c2e0d
MW
2056 struct tb_switch *sw = tb_to_switch(dev);
2057 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2058
2059 if (cm_ops->runtime_suspend_switch)
2060 return cm_ops->runtime_suspend_switch(sw);
2061
2d8ff0b5
MW
2062 return 0;
2063}
2064
2065static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2066{
4f7c2e0d
MW
2067 struct tb_switch *sw = tb_to_switch(dev);
2068 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2069
2070 if (cm_ops->runtime_resume_switch)
2071 return cm_ops->runtime_resume_switch(sw);
2d8ff0b5
MW
2072 return 0;
2073}
2074
2075static const struct dev_pm_ops tb_switch_pm_ops = {
2076 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2077 NULL)
2078};
2079
bfe778ac
MW
2080struct device_type tb_switch_type = {
2081 .name = "thunderbolt_device",
2082 .release = tb_switch_release,
2f608ba1 2083 .uevent = tb_switch_uevent,
2d8ff0b5 2084 .pm = &tb_switch_pm_ops,
bfe778ac
MW
2085};
2086
2c3c4197
MW
2087static int tb_switch_get_generation(struct tb_switch *sw)
2088{
2089 switch (sw->config.device_id) {
2090 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2091 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2092 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2093 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2094 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2095 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2096 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2097 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2098 return 1;
2099
2100 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2101 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2102 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2103 return 2;
2104
2105 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2106 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2107 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2108 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2109 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
4bac471d
RM
2110 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2111 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2112 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
3cdb9446
MW
2113 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2114 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2c3c4197
MW
2115 return 3;
2116
2117 default:
b0407983
MW
2118 if (tb_switch_is_usb4(sw))
2119 return 4;
2120
2c3c4197
MW
2121 /*
2122 * For unknown switches assume generation to be 1 to be
2123 * on the safe side.
2124 */
2125 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2126 sw->config.device_id);
2127 return 1;
2128 }
2129}
2130
b0407983
MW
2131static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2132{
2133 int max_depth;
2134
2135 if (tb_switch_is_usb4(sw) ||
2136 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2137 max_depth = USB4_SWITCH_MAX_DEPTH;
2138 else
2139 max_depth = TB_SWITCH_MAX_DEPTH;
2140
2141 return depth > max_depth;
2142}
2143
a25c8b2f 2144/**
bfe778ac
MW
2145 * tb_switch_alloc() - allocate a switch
2146 * @tb: Pointer to the owning domain
2147 * @parent: Parent device for this switch
2148 * @route: Route string for this switch
a25c8b2f 2149 *
bfe778ac
MW
2150 * Allocates and initializes a switch. Will not upload configuration to
2151 * the switch. For that you need to call tb_switch_configure()
2152 * separately. The returned switch should be released by calling
2153 * tb_switch_put().
2154 *
444ac384
MW
2155 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2156 * failure.
a25c8b2f 2157 */
bfe778ac
MW
2158struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2159 u64 route)
a25c8b2f 2160{
a25c8b2f 2161 struct tb_switch *sw;
f0342e75 2162 int upstream_port;
444ac384 2163 int i, ret, depth;
f0342e75 2164
b0407983
MW
2165 /* Unlock the downstream port so we can access the switch below */
2166 if (route) {
2167 struct tb_switch *parent_sw = tb_to_switch(parent);
2168 struct tb_port *down;
2169
2170 down = tb_port_at(route, parent_sw);
2171 tb_port_unlock(down);
2172 }
2173
f0342e75 2174 depth = tb_route_length(route);
f0342e75
MW
2175
2176 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
a25c8b2f 2177 if (upstream_port < 0)
444ac384 2178 return ERR_PTR(upstream_port);
a25c8b2f
AN
2179
2180 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2181 if (!sw)
444ac384 2182 return ERR_PTR(-ENOMEM);
a25c8b2f
AN
2183
2184 sw->tb = tb;
444ac384
MW
2185 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2186 if (ret)
bfe778ac
MW
2187 goto err_free_sw_ports;
2188
b0407983
MW
2189 sw->generation = tb_switch_get_generation(sw);
2190
daa5140f 2191 tb_dbg(tb, "current switch config:\n");
b0407983 2192 tb_dump_switch(tb, sw);
a25c8b2f
AN
2193
2194 /* configure switch */
2195 sw->config.upstream_port_number = upstream_port;
f0342e75
MW
2196 sw->config.depth = depth;
2197 sw->config.route_hi = upper_32_bits(route);
2198 sw->config.route_lo = lower_32_bits(route);
bfe778ac 2199 sw->config.enabled = 0;
a25c8b2f 2200
b0407983 2201 /* Make sure we do not exceed maximum topology limit */
704a940d
CIK
2202 if (tb_switch_exceeds_max_depth(sw, depth)) {
2203 ret = -EADDRNOTAVAIL;
2204 goto err_free_sw_ports;
2205 }
b0407983 2206
a25c8b2f
AN
2207 /* initialize ports */
2208 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
343fcb8c 2209 GFP_KERNEL);
444ac384
MW
2210 if (!sw->ports) {
2211 ret = -ENOMEM;
bfe778ac 2212 goto err_free_sw_ports;
444ac384 2213 }
a25c8b2f
AN
2214
2215 for (i = 0; i <= sw->config.max_port_number; i++) {
343fcb8c
AN
2216 /* minimum setup for tb_find_cap and tb_drom_read to work */
2217 sw->ports[i].sw = sw;
2218 sw->ports[i].port = i;
781e14ea
MW
2219
2220 /* Control port does not need HopID allocation */
2221 if (i) {
2222 ida_init(&sw->ports[i].in_hopids);
2223 ida_init(&sw->ports[i].out_hopids);
2224 }
a25c8b2f
AN
2225 }
2226
444ac384 2227 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
b0407983
MW
2228 if (ret > 0)
2229 sw->cap_plug_events = ret;
ca389f71 2230
23ccd21c
GF
2231 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2232 if (ret > 0)
2233 sw->cap_vsec_tmu = ret;
2234
444ac384
MW
2235 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2236 if (ret > 0)
2237 sw->cap_lc = ret;
a9be5582 2238
f67cf491
MW
2239 /* Root switch is always authorized */
2240 if (!route)
2241 sw->authorized = true;
2242
bfe778ac
MW
2243 device_initialize(&sw->dev);
2244 sw->dev.parent = parent;
2245 sw->dev.bus = &tb_bus_type;
2246 sw->dev.type = &tb_switch_type;
2247 sw->dev.groups = switch_groups;
2248 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2249
2250 return sw;
2251
2252err_free_sw_ports:
2253 kfree(sw->ports);
2254 kfree(sw);
2255
444ac384 2256 return ERR_PTR(ret);
bfe778ac
MW
2257}
2258
e6b245cc
MW
2259/**
2260 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2261 * @tb: Pointer to the owning domain
2262 * @parent: Parent device for this switch
2263 * @route: Route string for this switch
2264 *
2265 * This creates a switch in safe mode. This means the switch pretty much
2266 * lacks all capabilities except DMA configuration port before it is
2267 * flashed with a valid NVM firmware.
2268 *
2269 * The returned switch must be released by calling tb_switch_put().
2270 *
444ac384 2271 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
e6b245cc
MW
2272 */
2273struct tb_switch *
2274tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2275{
2276 struct tb_switch *sw;
2277
2278 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2279 if (!sw)
444ac384 2280 return ERR_PTR(-ENOMEM);
e6b245cc
MW
2281
2282 sw->tb = tb;
2283 sw->config.depth = tb_route_length(route);
2284 sw->config.route_hi = upper_32_bits(route);
2285 sw->config.route_lo = lower_32_bits(route);
2286 sw->safe_mode = true;
2287
2288 device_initialize(&sw->dev);
2289 sw->dev.parent = parent;
2290 sw->dev.bus = &tb_bus_type;
2291 sw->dev.type = &tb_switch_type;
2292 sw->dev.groups = switch_groups;
2293 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2294
2295 return sw;
2296}
2297
bfe778ac
MW
2298/**
2299 * tb_switch_configure() - Uploads configuration to the switch
2300 * @sw: Switch to configure
2301 *
2302 * Call this function before the switch is added to the system. It will
2303 * upload configuration to the switch and makes it available for the
b0407983
MW
2304 * connection manager to use. Can be called to the switch again after
2305 * resume from low power states to re-initialize it.
bfe778ac
MW
2306 *
2307 * Return: %0 in case of success and negative errno in case of failure
2308 */
2309int tb_switch_configure(struct tb_switch *sw)
2310{
2311 struct tb *tb = sw->tb;
2312 u64 route;
2313 int ret;
2314
2315 route = tb_route(sw);
bfe778ac 2316
b0407983 2317 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
b2911a59 2318 sw->config.enabled ? "restoring" : "initializing", route,
b0407983 2319 tb_route_length(route), sw->config.upstream_port_number);
bfe778ac 2320
bfe778ac
MW
2321 sw->config.enabled = 1;
2322
b0407983
MW
2323 if (tb_switch_is_usb4(sw)) {
2324 /*
2325 * For USB4 devices, we need to program the CM version
2326 * accordingly so that it knows to expose all the
2327 * additional capabilities.
2328 */
2329 sw->config.cmuv = USB4_VERSION_1_0;
2330
2331 /* Enumerate the switch */
2332 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2333 ROUTER_CS_1, 4);
2334 if (ret)
2335 return ret;
bfe778ac 2336
b0407983 2337 ret = usb4_switch_setup(sw);
b0407983
MW
2338 } else {
2339 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2340 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2341 sw->config.vendor_id);
2342
2343 if (!sw->cap_plug_events) {
2344 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2345 return -ENODEV;
2346 }
2347
2348 /* Enumerate the switch */
2349 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2350 ROUTER_CS_1, 3);
b0407983 2351 }
e879a709
MW
2352 if (ret)
2353 return ret;
2354
bfe778ac
MW
2355 return tb_plug_events_active(sw, true);
2356}
2357
2cc12751 2358static int tb_switch_set_uuid(struct tb_switch *sw)
bfe778ac 2359{
b0407983 2360 bool uid = false;
bfe778ac 2361 u32 uuid[4];
a9be5582 2362 int ret;
bfe778ac
MW
2363
2364 if (sw->uuid)
a9be5582 2365 return 0;
bfe778ac 2366
b0407983
MW
2367 if (tb_switch_is_usb4(sw)) {
2368 ret = usb4_switch_read_uid(sw, &sw->uid);
2369 if (ret)
2370 return ret;
2371 uid = true;
2372 } else {
2373 /*
2374 * The newer controllers include fused UUID as part of
2375 * link controller specific registers
2376 */
2377 ret = tb_lc_read_uuid(sw, uuid);
2378 if (ret) {
2379 if (ret != -EINVAL)
2380 return ret;
2381 uid = true;
2382 }
2383 }
2384
2385 if (uid) {
bfe778ac
MW
2386 /*
2387 * ICM generates UUID based on UID and fills the upper
2388 * two words with ones. This is not strictly following
2389 * UUID format but we want to be compatible with it so
2390 * we do the same here.
2391 */
2392 uuid[0] = sw->uid & 0xffffffff;
2393 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2394 uuid[2] = 0xffffffff;
2395 uuid[3] = 0xffffffff;
2396 }
2397
2398 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2cc12751 2399 if (!sw->uuid)
a9be5582
MW
2400 return -ENOMEM;
2401 return 0;
bfe778ac
MW
2402}
2403
e6b245cc 2404static int tb_switch_add_dma_port(struct tb_switch *sw)
3e136768 2405{
e6b245cc
MW
2406 u32 status;
2407 int ret;
2408
3e136768 2409 switch (sw->generation) {
3e136768
MW
2410 case 2:
2411 /* Only root switch can be upgraded */
2412 if (tb_route(sw))
e6b245cc 2413 return 0;
7a7ebfa8 2414
df561f66 2415 fallthrough;
7a7ebfa8 2416 case 3:
661b1947 2417 case 4:
7a7ebfa8
MW
2418 ret = tb_switch_set_uuid(sw);
2419 if (ret)
2420 return ret;
3e136768
MW
2421 break;
2422
2423 default:
e6b245cc
MW
2424 /*
2425 * DMA port is the only thing available when the switch
2426 * is in safe mode.
2427 */
2428 if (!sw->safe_mode)
2429 return 0;
2430 break;
3e136768
MW
2431 }
2432
661b1947
MW
2433 if (sw->no_nvm_upgrade)
2434 return 0;
2435
2436 if (tb_switch_is_usb4(sw)) {
2437 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2438 if (ret)
2439 return ret;
2440
2441 if (status) {
2442 tb_sw_info(sw, "switch flash authentication failed\n");
2443 nvm_set_auth_status(sw, status);
2444 }
2445
2446 return 0;
2447 }
2448
3f415e5e 2449 /* Root switch DMA port requires running firmware */
f07a3608 2450 if (!tb_route(sw) && !tb_switch_is_icm(sw))
e6b245cc
MW
2451 return 0;
2452
3e136768 2453 sw->dma_port = dma_port_alloc(sw);
e6b245cc
MW
2454 if (!sw->dma_port)
2455 return 0;
2456
7a7ebfa8
MW
2457 /*
2458 * If there is status already set then authentication failed
2459 * when the dma_port_flash_update_auth() returned. Power cycling
2460 * is not needed (it was done already) so only thing we do here
2461 * is to unblock runtime PM of the root port.
2462 */
2463 nvm_get_auth_status(sw, &status);
2464 if (status) {
2465 if (!tb_route(sw))
b0407983 2466 nvm_authenticate_complete_dma_port(sw);
7a7ebfa8
MW
2467 return 0;
2468 }
2469
e6b245cc
MW
2470 /*
2471 * Check status of the previous flash authentication. If there
2472 * is one we need to power cycle the switch in any case to make
2473 * it functional again.
2474 */
2475 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2476 if (ret <= 0)
2477 return ret;
2478
1830b6ee
MW
2479 /* Now we can allow root port to suspend again */
2480 if (!tb_route(sw))
b0407983 2481 nvm_authenticate_complete_dma_port(sw);
1830b6ee 2482
e6b245cc
MW
2483 if (status) {
2484 tb_sw_info(sw, "switch flash authentication failed\n");
e6b245cc
MW
2485 nvm_set_auth_status(sw, status);
2486 }
2487
2488 tb_sw_info(sw, "power cycling the switch now\n");
2489 dma_port_power_cycle(sw->dma_port);
2490
2491 /*
2492 * We return error here which causes the switch adding failure.
2493 * It should appear back after power cycle is complete.
2494 */
2495 return -ESHUTDOWN;
3e136768
MW
2496}
2497
0d46c08d
MW
2498static void tb_switch_default_link_ports(struct tb_switch *sw)
2499{
2500 int i;
2501
42716425 2502 for (i = 1; i <= sw->config.max_port_number; i++) {
0d46c08d
MW
2503 struct tb_port *port = &sw->ports[i];
2504 struct tb_port *subordinate;
2505
2506 if (!tb_port_is_null(port))
2507 continue;
2508
2509 /* Check for the subordinate port */
2510 if (i == sw->config.max_port_number ||
2511 !tb_port_is_null(&sw->ports[i + 1]))
2512 continue;
2513
2514 /* Link them if not already done so (by DROM) */
2515 subordinate = &sw->ports[i + 1];
2516 if (!port->dual_link_port && !subordinate->dual_link_port) {
2517 port->link_nr = 0;
2518 port->dual_link_port = subordinate;
2519 subordinate->link_nr = 1;
2520 subordinate->dual_link_port = port;
2521
2522 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2523 port->port, subordinate->port);
2524 }
2525 }
2526}
2527
91c0c120
MW
2528static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2529{
2530 const struct tb_port *up = tb_upstream_port(sw);
2531
2532 if (!up->dual_link_port || !up->dual_link_port->remote)
2533 return false;
2534
b0407983
MW
2535 if (tb_switch_is_usb4(sw))
2536 return usb4_switch_lane_bonding_possible(sw);
91c0c120
MW
2537 return tb_lc_lane_bonding_possible(sw);
2538}
2539
2540static int tb_switch_update_link_attributes(struct tb_switch *sw)
2541{
2542 struct tb_port *up;
2543 bool change = false;
2544 int ret;
2545
2546 if (!tb_route(sw) || tb_switch_is_icm(sw))
2547 return 0;
2548
2549 up = tb_upstream_port(sw);
2550
2551 ret = tb_port_get_link_speed(up);
2552 if (ret < 0)
2553 return ret;
2554 if (sw->link_speed != ret)
2555 change = true;
2556 sw->link_speed = ret;
2557
2558 ret = tb_port_get_link_width(up);
2559 if (ret < 0)
2560 return ret;
2561 if (sw->link_width != ret)
2562 change = true;
2563 sw->link_width = ret;
2564
2565 /* Notify userspace that there is possible link attribute change */
2566 if (device_is_registered(&sw->dev) && change)
2567 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2568
2569 return 0;
2570}
2571
2572/**
2573 * tb_switch_lane_bonding_enable() - Enable lane bonding
2574 * @sw: Switch to enable lane bonding
2575 *
2576 * Connection manager can call this function to enable lane bonding of a
2577 * switch. If conditions are correct and both switches support the feature,
2578 * lanes are bonded. It is safe to call this to any switch.
2579 */
2580int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2581{
2582 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2583 struct tb_port *up, *down;
2584 u64 route = tb_route(sw);
2585 int ret;
2586
2587 if (!route)
2588 return 0;
2589
2590 if (!tb_switch_lane_bonding_possible(sw))
2591 return 0;
2592
2593 up = tb_upstream_port(sw);
2594 down = tb_port_at(route, parent);
2595
2596 if (!tb_port_is_width_supported(up, 2) ||
2597 !tb_port_is_width_supported(down, 2))
2598 return 0;
2599
2600 ret = tb_port_lane_bonding_enable(up);
2601 if (ret) {
2602 tb_port_warn(up, "failed to enable lane bonding\n");
2603 return ret;
2604 }
2605
2606 ret = tb_port_lane_bonding_enable(down);
2607 if (ret) {
2608 tb_port_warn(down, "failed to enable lane bonding\n");
2609 tb_port_lane_bonding_disable(up);
2610 return ret;
2611 }
2612
e7051bea
MW
2613 ret = tb_port_wait_for_link_width(down, 2, 100);
2614 if (ret) {
2615 tb_port_warn(down, "timeout enabling lane bonding\n");
2616 return ret;
2617 }
2618
69fea377
MW
2619 tb_port_update_credits(down);
2620 tb_port_update_credits(up);
91c0c120
MW
2621 tb_switch_update_link_attributes(sw);
2622
2623 tb_sw_dbg(sw, "lane bonding enabled\n");
2624 return ret;
2625}
2626
2627/**
2628 * tb_switch_lane_bonding_disable() - Disable lane bonding
2629 * @sw: Switch whose lane bonding to disable
2630 *
2631 * Disables lane bonding between @sw and parent. This can be called even
2632 * if lanes were not bonded originally.
2633 */
2634void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2635{
2636 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2637 struct tb_port *up, *down;
2638
2639 if (!tb_route(sw))
2640 return;
2641
2642 up = tb_upstream_port(sw);
2643 if (!up->bonded)
2644 return;
2645
2646 down = tb_port_at(tb_route(sw), parent);
2647
2648 tb_port_lane_bonding_disable(up);
2649 tb_port_lane_bonding_disable(down);
2650
e7051bea
MW
2651 /*
2652 * It is fine if we get other errors as the router might have
2653 * been unplugged.
2654 */
2655 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2656 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2657
69fea377
MW
2658 tb_port_update_credits(down);
2659 tb_port_update_credits(up);
91c0c120 2660 tb_switch_update_link_attributes(sw);
69fea377 2661
91c0c120
MW
2662 tb_sw_dbg(sw, "lane bonding disabled\n");
2663}
2664
de462039
MW
2665/**
2666 * tb_switch_configure_link() - Set link configured
2667 * @sw: Switch whose link is configured
2668 *
2669 * Sets the link upstream from @sw configured (from both ends) so that
2670 * it will not be disconnected when the domain exits sleep. Can be
2671 * called for any switch.
2672 *
2673 * It is recommended that this is called after lane bonding is enabled.
2674 *
2675 * Returns %0 on success and negative errno in case of error.
2676 */
2677int tb_switch_configure_link(struct tb_switch *sw)
2678{
e28178bf
MW
2679 struct tb_port *up, *down;
2680 int ret;
2681
de462039
MW
2682 if (!tb_route(sw) || tb_switch_is_icm(sw))
2683 return 0;
2684
e28178bf
MW
2685 up = tb_upstream_port(sw);
2686 if (tb_switch_is_usb4(up->sw))
2687 ret = usb4_port_configure(up);
2688 else
2689 ret = tb_lc_configure_port(up);
2690 if (ret)
2691 return ret;
2692
2693 down = up->remote;
2694 if (tb_switch_is_usb4(down->sw))
2695 return usb4_port_configure(down);
2696 return tb_lc_configure_port(down);
de462039
MW
2697}
2698
2699/**
2700 * tb_switch_unconfigure_link() - Unconfigure link
2701 * @sw: Switch whose link is unconfigured
2702 *
2703 * Sets the link unconfigured so the @sw will be disconnected if the
2704 * domain exists sleep.
2705 */
2706void tb_switch_unconfigure_link(struct tb_switch *sw)
2707{
e28178bf
MW
2708 struct tb_port *up, *down;
2709
de462039
MW
2710 if (sw->is_unplugged)
2711 return;
2712 if (!tb_route(sw) || tb_switch_is_icm(sw))
2713 return;
2714
e28178bf
MW
2715 up = tb_upstream_port(sw);
2716 if (tb_switch_is_usb4(up->sw))
2717 usb4_port_unconfigure(up);
2718 else
2719 tb_lc_unconfigure_port(up);
2720
2721 down = up->remote;
2722 if (tb_switch_is_usb4(down->sw))
2723 usb4_port_unconfigure(down);
de462039 2724 else
e28178bf 2725 tb_lc_unconfigure_port(down);
de462039
MW
2726}
2727
56ad3aef
MW
2728static void tb_switch_credits_init(struct tb_switch *sw)
2729{
2730 if (tb_switch_is_icm(sw))
2731 return;
2732 if (!tb_switch_is_usb4(sw))
2733 return;
2734 if (usb4_switch_credits_init(sw))
2735 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2736}
2737
bfe778ac
MW
2738/**
2739 * tb_switch_add() - Add a switch to the domain
2740 * @sw: Switch to add
2741 *
2742 * This is the last step in adding switch to the domain. It will read
2743 * identification information from DROM and initializes ports so that
2744 * they can be used to connect other switches. The switch will be
2745 * exposed to the userspace when this function successfully returns. To
2746 * remove and release the switch, call tb_switch_remove().
2747 *
2748 * Return: %0 in case of success and negative errno in case of failure
2749 */
2750int tb_switch_add(struct tb_switch *sw)
2751{
2752 int i, ret;
2753
3e136768
MW
2754 /*
2755 * Initialize DMA control port now before we read DROM. Recent
2756 * host controllers have more complete DROM on NVM that includes
2757 * vendor and model identification strings which we then expose
2758 * to the userspace. NVM can be accessed through DMA
2759 * configuration based mailbox.
2760 */
e6b245cc 2761 ret = tb_switch_add_dma_port(sw);
af99f696
MW
2762 if (ret) {
2763 dev_err(&sw->dev, "failed to add DMA port\n");
f53e7676 2764 return ret;
af99f696 2765 }
343fcb8c 2766
e6b245cc 2767 if (!sw->safe_mode) {
56ad3aef
MW
2768 tb_switch_credits_init(sw);
2769
e6b245cc
MW
2770 /* read drom */
2771 ret = tb_drom_read(sw);
2772 if (ret) {
af99f696 2773 dev_err(&sw->dev, "reading DROM failed\n");
e6b245cc
MW
2774 return ret;
2775 }
daa5140f 2776 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
bfe778ac 2777
e23a5afd
MW
2778 tb_check_quirks(sw);
2779
2cc12751 2780 ret = tb_switch_set_uuid(sw);
af99f696
MW
2781 if (ret) {
2782 dev_err(&sw->dev, "failed to set UUID\n");
2cc12751 2783 return ret;
af99f696 2784 }
e6b245cc
MW
2785
2786 for (i = 0; i <= sw->config.max_port_number; i++) {
2787 if (sw->ports[i].disabled) {
daa5140f 2788 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
e6b245cc
MW
2789 continue;
2790 }
2791 ret = tb_init_port(&sw->ports[i]);
af99f696
MW
2792 if (ret) {
2793 dev_err(&sw->dev, "failed to initialize port %d\n", i);
e6b245cc 2794 return ret;
af99f696 2795 }
343fcb8c 2796 }
91c0c120 2797
0d46c08d
MW
2798 tb_switch_default_link_ports(sw);
2799
91c0c120
MW
2800 ret = tb_switch_update_link_attributes(sw);
2801 if (ret)
2802 return ret;
cf29b9af
RM
2803
2804 ret = tb_switch_tmu_init(sw);
2805 if (ret)
2806 return ret;
343fcb8c
AN
2807 }
2808
e6b245cc 2809 ret = device_add(&sw->dev);
af99f696
MW
2810 if (ret) {
2811 dev_err(&sw->dev, "failed to add device: %d\n", ret);
e6b245cc 2812 return ret;
af99f696 2813 }
e6b245cc 2814
a83bc4a5
MW
2815 if (tb_route(sw)) {
2816 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2817 sw->vendor, sw->device);
2818 if (sw->vendor_name && sw->device_name)
2819 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2820 sw->device_name);
2821 }
2822
cae5f515
MW
2823 ret = usb4_switch_add_ports(sw);
2824 if (ret) {
2825 dev_err(&sw->dev, "failed to add USB4 ports\n");
2826 goto err_del;
2827 }
2828
e6b245cc 2829 ret = tb_switch_nvm_add(sw);
2d8ff0b5 2830 if (ret) {
af99f696 2831 dev_err(&sw->dev, "failed to add NVM devices\n");
cae5f515 2832 goto err_ports;
2d8ff0b5 2833 }
e6b245cc 2834
b2911a59
MW
2835 /*
2836 * Thunderbolt routers do not generate wakeups themselves but
2837 * they forward wakeups from tunneled protocols, so enable it
2838 * here.
2839 */
2840 device_init_wakeup(&sw->dev, true);
2841
2d8ff0b5
MW
2842 pm_runtime_set_active(&sw->dev);
2843 if (sw->rpm) {
2844 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2845 pm_runtime_use_autosuspend(&sw->dev);
2846 pm_runtime_mark_last_busy(&sw->dev);
2847 pm_runtime_enable(&sw->dev);
2848 pm_request_autosuspend(&sw->dev);
2849 }
2850
54e41810 2851 tb_switch_debugfs_init(sw);
2d8ff0b5 2852 return 0;
cae5f515
MW
2853
2854err_ports:
2855 usb4_switch_remove_ports(sw);
2856err_del:
2857 device_del(&sw->dev);
2858
2859 return ret;
bfe778ac 2860}
c90553b3 2861
bfe778ac
MW
2862/**
2863 * tb_switch_remove() - Remove and release a switch
2864 * @sw: Switch to remove
2865 *
2866 * This will remove the switch from the domain and release it after last
2867 * reference count drops to zero. If there are switches connected below
2868 * this switch, they will be removed as well.
2869 */
2870void tb_switch_remove(struct tb_switch *sw)
2871{
b433d010 2872 struct tb_port *port;
ca389f71 2873
54e41810
GF
2874 tb_switch_debugfs_remove(sw);
2875
2d8ff0b5
MW
2876 if (sw->rpm) {
2877 pm_runtime_get_sync(&sw->dev);
2878 pm_runtime_disable(&sw->dev);
2879 }
2880
bfe778ac 2881 /* port 0 is the switch itself and never has a remote */
b433d010
MW
2882 tb_switch_for_each_port(sw, port) {
2883 if (tb_port_has_remote(port)) {
2884 tb_switch_remove(port->remote->sw);
2885 port->remote = NULL;
2886 } else if (port->xdomain) {
2887 tb_xdomain_remove(port->xdomain);
2888 port->xdomain = NULL;
dfe40ca4 2889 }
dacb1287
KK
2890
2891 /* Remove any downstream retimers */
2892 tb_retimer_remove_all(port);
bfe778ac
MW
2893 }
2894
2895 if (!sw->is_unplugged)
2896 tb_plug_events_active(sw, false);
b0407983 2897
e6b245cc 2898 tb_switch_nvm_remove(sw);
cae5f515 2899 usb4_switch_remove_ports(sw);
a83bc4a5
MW
2900
2901 if (tb_route(sw))
2902 dev_info(&sw->dev, "device disconnected\n");
bfe778ac 2903 device_unregister(&sw->dev);
a25c8b2f
AN
2904}
2905
053596d9 2906/**
aae20bb6 2907 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
5c6b471b 2908 * @sw: Router to mark unplugged
053596d9 2909 */
aae20bb6 2910void tb_sw_set_unplugged(struct tb_switch *sw)
053596d9 2911{
b433d010
MW
2912 struct tb_port *port;
2913
053596d9
AN
2914 if (sw == sw->tb->root_switch) {
2915 tb_sw_WARN(sw, "cannot unplug root switch\n");
2916 return;
2917 }
2918 if (sw->is_unplugged) {
2919 tb_sw_WARN(sw, "is_unplugged already set\n");
2920 return;
2921 }
2922 sw->is_unplugged = true;
b433d010
MW
2923 tb_switch_for_each_port(sw, port) {
2924 if (tb_port_has_remote(port))
2925 tb_sw_set_unplugged(port->remote->sw);
2926 else if (port->xdomain)
2927 port->xdomain->is_unplugged = true;
053596d9
AN
2928 }
2929}
2930
b2911a59
MW
2931static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2932{
2933 if (flags)
2934 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2935 else
2936 tb_sw_dbg(sw, "disabling wakeup\n");
2937
2938 if (tb_switch_is_usb4(sw))
2939 return usb4_switch_set_wake(sw, flags);
2940 return tb_lc_set_wake(sw, flags);
2941}
2942
23dd5bb4
AN
2943int tb_switch_resume(struct tb_switch *sw)
2944{
b433d010
MW
2945 struct tb_port *port;
2946 int err;
2947
daa5140f 2948 tb_sw_dbg(sw, "resuming switch\n");
23dd5bb4 2949
08a5e4ce
MW
2950 /*
2951 * Check for UID of the connected switches except for root
2952 * switch which we assume cannot be removed.
2953 */
2954 if (tb_route(sw)) {
2955 u64 uid;
2956
7ea4cd6b
MW
2957 /*
2958 * Check first that we can still read the switch config
2959 * space. It may be that there is now another domain
2960 * connected.
2961 */
2962 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2963 if (err < 0) {
2964 tb_sw_info(sw, "switch not present anymore\n");
2965 return err;
2966 }
2967
b0407983
MW
2968 if (tb_switch_is_usb4(sw))
2969 err = usb4_switch_read_uid(sw, &uid);
2970 else
2971 err = tb_drom_read_uid_only(sw, &uid);
08a5e4ce
MW
2972 if (err) {
2973 tb_sw_warn(sw, "uid read failed\n");
2974 return err;
2975 }
2976 if (sw->uid != uid) {
2977 tb_sw_info(sw,
2978 "changed while suspended (uid %#llx -> %#llx)\n",
2979 sw->uid, uid);
2980 return -ENODEV;
2981 }
23dd5bb4
AN
2982 }
2983
b0407983 2984 err = tb_switch_configure(sw);
23dd5bb4
AN
2985 if (err)
2986 return err;
2987
b2911a59
MW
2988 /* Disable wakes */
2989 tb_switch_set_wake(sw, 0);
2990
8145c435
MW
2991 err = tb_switch_tmu_init(sw);
2992 if (err)
2993 return err;
2994
23dd5bb4 2995 /* check for surviving downstream switches */
b433d010 2996 tb_switch_for_each_port(sw, port) {
3fb10ea4
RM
2997 if (!tb_port_is_null(port))
2998 continue;
2999
3000 if (!tb_port_resume(port))
23dd5bb4 3001 continue;
dfe40ca4 3002
7ea4cd6b 3003 if (tb_wait_for_port(port, true) <= 0) {
23dd5bb4
AN
3004 tb_port_warn(port,
3005 "lost during suspend, disconnecting\n");
7ea4cd6b
MW
3006 if (tb_port_has_remote(port))
3007 tb_sw_set_unplugged(port->remote->sw);
3008 else if (port->xdomain)
3009 port->xdomain->is_unplugged = true;
3fb10ea4 3010 } else {
b0407983
MW
3011 /*
3012 * Always unlock the port so the downstream
3013 * switch/domain is accessible.
3014 */
3015 if (tb_port_unlock(port))
3016 tb_port_warn(port, "failed to unlock port\n");
3017 if (port->remote && tb_switch_resume(port->remote->sw)) {
7ea4cd6b
MW
3018 tb_port_warn(port,
3019 "lost during suspend, disconnecting\n");
3020 tb_sw_set_unplugged(port->remote->sw);
3021 }
23dd5bb4
AN
3022 }
3023 }
3024 return 0;
3025}
3026
6ac6faee
MW
3027/**
3028 * tb_switch_suspend() - Put a switch to sleep
3029 * @sw: Switch to suspend
3030 * @runtime: Is this runtime suspend or system sleep
3031 *
3032 * Suspends router and all its children. Enables wakes according to
3033 * value of @runtime and then sets sleep bit for the router. If @sw is
3034 * host router the domain is ready to go to sleep once this function
3035 * returns.
3036 */
3037void tb_switch_suspend(struct tb_switch *sw, bool runtime)
23dd5bb4 3038{
b2911a59 3039 unsigned int flags = 0;
b433d010
MW
3040 struct tb_port *port;
3041 int err;
3042
6ac6faee
MW
3043 tb_sw_dbg(sw, "suspending switch\n");
3044
23dd5bb4
AN
3045 err = tb_plug_events_active(sw, false);
3046 if (err)
3047 return;
3048
b433d010
MW
3049 tb_switch_for_each_port(sw, port) {
3050 if (tb_port_has_remote(port))
6ac6faee 3051 tb_switch_suspend(port->remote->sw, runtime);
23dd5bb4 3052 }
5480dfc2 3053
6ac6faee
MW
3054 if (runtime) {
3055 /* Trigger wake when something is plugged in/out */
3056 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
6026b703
MW
3057 flags |= TB_WAKE_ON_USB4;
3058 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
6ac6faee
MW
3059 } else if (device_may_wakeup(&sw->dev)) {
3060 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3061 }
b2911a59
MW
3062
3063 tb_switch_set_wake(sw, flags);
3064
b0407983
MW
3065 if (tb_switch_is_usb4(sw))
3066 usb4_switch_set_sleep(sw);
3067 else
3068 tb_lc_set_sleep(sw);
23dd5bb4 3069}
f67cf491 3070
8afe909b
MW
3071/**
3072 * tb_switch_query_dp_resource() - Query availability of DP resource
3073 * @sw: Switch whose DP resource is queried
3074 * @in: DP IN port
3075 *
3076 * Queries availability of DP resource for DP tunneling using switch
3077 * specific means. Returns %true if resource is available.
3078 */
3079bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3080{
b0407983
MW
3081 if (tb_switch_is_usb4(sw))
3082 return usb4_switch_query_dp_resource(sw, in);
8afe909b
MW
3083 return tb_lc_dp_sink_query(sw, in);
3084}
3085
3086/**
3087 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3088 * @sw: Switch whose DP resource is allocated
3089 * @in: DP IN port
3090 *
3091 * Allocates DP resource for DP tunneling. The resource must be
3092 * available for this to succeed (see tb_switch_query_dp_resource()).
3093 * Returns %0 in success and negative errno otherwise.
3094 */
3095int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3096{
ce05b997
MW
3097 int ret;
3098
b0407983 3099 if (tb_switch_is_usb4(sw))
ce05b997
MW
3100 ret = usb4_switch_alloc_dp_resource(sw, in);
3101 else
3102 ret = tb_lc_dp_sink_alloc(sw, in);
3103
3104 if (ret)
3105 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3106 in->port);
3107 else
3108 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3109
3110 return ret;
8afe909b
MW
3111}
3112
3113/**
3114 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3115 * @sw: Switch whose DP resource is de-allocated
3116 * @in: DP IN port
3117 *
3118 * De-allocates DP resource that was previously allocated for DP
3119 * tunneling.
3120 */
3121void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3122{
b0407983
MW
3123 int ret;
3124
3125 if (tb_switch_is_usb4(sw))
3126 ret = usb4_switch_dealloc_dp_resource(sw, in);
3127 else
3128 ret = tb_lc_dp_sink_dealloc(sw, in);
3129
3130 if (ret)
8afe909b
MW
3131 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3132 in->port);
ce05b997
MW
3133 else
3134 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
8afe909b
MW
3135}
3136
f67cf491
MW
3137struct tb_sw_lookup {
3138 struct tb *tb;
3139 u8 link;
3140 u8 depth;
7c39ffe7 3141 const uuid_t *uuid;
8e9267bb 3142 u64 route;
f67cf491
MW
3143};
3144
418e3ea1 3145static int tb_switch_match(struct device *dev, const void *data)
f67cf491
MW
3146{
3147 struct tb_switch *sw = tb_to_switch(dev);
418e3ea1 3148 const struct tb_sw_lookup *lookup = data;
f67cf491
MW
3149
3150 if (!sw)
3151 return 0;
3152 if (sw->tb != lookup->tb)
3153 return 0;
3154
3155 if (lookup->uuid)
3156 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3157
8e9267bb
RM
3158 if (lookup->route) {
3159 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3160 sw->config.route_hi == upper_32_bits(lookup->route);
3161 }
3162
f67cf491
MW
3163 /* Root switch is matched only by depth */
3164 if (!lookup->depth)
3165 return !sw->depth;
3166
3167 return sw->link == lookup->link && sw->depth == lookup->depth;
3168}
3169
3170/**
3171 * tb_switch_find_by_link_depth() - Find switch by link and depth
3172 * @tb: Domain the switch belongs
3173 * @link: Link number the switch is connected
3174 * @depth: Depth of the switch in link
3175 *
3176 * Returned switch has reference count increased so the caller needs to
3177 * call tb_switch_put() when done with the switch.
3178 */
3179struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3180{
3181 struct tb_sw_lookup lookup;
3182 struct device *dev;
3183
3184 memset(&lookup, 0, sizeof(lookup));
3185 lookup.tb = tb;
3186 lookup.link = link;
3187 lookup.depth = depth;
3188
3189 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3190 if (dev)
3191 return tb_to_switch(dev);
3192
3193 return NULL;
3194}
3195
3196/**
432019d6 3197 * tb_switch_find_by_uuid() - Find switch by UUID
f67cf491
MW
3198 * @tb: Domain the switch belongs
3199 * @uuid: UUID to look for
3200 *
3201 * Returned switch has reference count increased so the caller needs to
3202 * call tb_switch_put() when done with the switch.
3203 */
7c39ffe7 3204struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
f67cf491
MW
3205{
3206 struct tb_sw_lookup lookup;
3207 struct device *dev;
3208
3209 memset(&lookup, 0, sizeof(lookup));
3210 lookup.tb = tb;
3211 lookup.uuid = uuid;
3212
3213 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3214 if (dev)
3215 return tb_to_switch(dev);
3216
3217 return NULL;
3218}
e6b245cc 3219
8e9267bb
RM
3220/**
3221 * tb_switch_find_by_route() - Find switch by route string
3222 * @tb: Domain the switch belongs
3223 * @route: Route string to look for
3224 *
3225 * Returned switch has reference count increased so the caller needs to
3226 * call tb_switch_put() when done with the switch.
3227 */
3228struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3229{
3230 struct tb_sw_lookup lookup;
3231 struct device *dev;
3232
3233 if (!route)
3234 return tb_switch_get(tb->root_switch);
3235
3236 memset(&lookup, 0, sizeof(lookup));
3237 lookup.tb = tb;
3238 lookup.route = route;
3239
3240 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3241 if (dev)
3242 return tb_to_switch(dev);
3243
3244 return NULL;
3245}
3246
386e5e29
MW
3247/**
3248 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3249 * @sw: Switch to find the port from
3250 * @type: Port type to look for
3251 */
3252struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3253 enum tb_port_type type)
3254{
3255 struct tb_port *port;
3256
3257 tb_switch_for_each_port(sw, port) {
3258 if (port->config.type == type)
3259 return port;
3260 }
3261
3262 return NULL;
3263}
8a90e4fa
GF
3264
3265static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
3266{
3267 u32 phy;
3268 int ret;
3269
3270 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3271 port->cap_phy + LANE_ADP_CS_1, 1);
3272 if (ret)
3273 return ret;
3274
3275 if (secondary)
3276 phy |= LANE_ADP_CS_1_PMS;
3277 else
3278 phy &= ~LANE_ADP_CS_1_PMS;
3279
3280 return tb_port_write(port, &phy, TB_CFG_PORT,
3281 port->cap_phy + LANE_ADP_CS_1, 1);
3282}
3283
3284static int tb_port_pm_secondary_enable(struct tb_port *port)
3285{
3286 return __tb_port_pm_secondary_set(port, true);
3287}
3288
3289static int tb_port_pm_secondary_disable(struct tb_port *port)
3290{
3291 return __tb_port_pm_secondary_set(port, false);
3292}
3293
3294static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3295{
3296 struct tb_switch *parent = tb_switch_parent(sw);
3297 struct tb_port *up, *down;
3298 int ret;
3299
3300 if (!tb_route(sw))
3301 return 0;
3302
3303 up = tb_upstream_port(sw);
3304 down = tb_port_at(tb_route(sw), parent);
3305 ret = tb_port_pm_secondary_enable(up);
3306 if (ret)
3307 return ret;
3308
3309 return tb_port_pm_secondary_disable(down);
3310}
3311
3312static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
3313{
3314 u32 mask, val;
3315 bool ret;
3316
3317 /* Don't enable CLx in case of two single-lane links */
3318 if (!port->bonded && port->dual_link_port)
3319 return false;
3320
3321 /* Don't enable CLx in case of inter-domain link */
3322 if (port->xdomain)
3323 return false;
3324
3325 if (!usb4_port_clx_supported(port))
3326 return false;
3327
3328 switch (clx) {
3329 case TB_CL0S:
3330 /* CL0s support requires also CL1 support */
3331 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
3332 break;
3333
3334 /* For now we support only CL0s. Not CL1, CL2 */
3335 case TB_CL1:
3336 case TB_CL2:
3337 default:
3338 return false;
3339 }
3340
3341 ret = tb_port_read(port, &val, TB_CFG_PORT,
3342 port->cap_phy + LANE_ADP_CS_0, 1);
3343 if (ret)
3344 return false;
3345
3346 return !!(val & mask);
3347}
3348
3349static inline bool tb_port_cl0s_supported(struct tb_port *port)
3350{
3351 return tb_port_clx_supported(port, TB_CL0S);
3352}
3353
3354static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
3355{
3356 u32 phy, mask;
3357 int ret;
3358
3359 /* To enable CL0s also required to enable CL1 */
3360 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
3361 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3362 port->cap_phy + LANE_ADP_CS_1, 1);
3363 if (ret)
3364 return ret;
3365
3366 if (enable)
3367 phy |= mask;
3368 else
3369 phy &= ~mask;
3370
3371 return tb_port_write(port, &phy, TB_CFG_PORT,
3372 port->cap_phy + LANE_ADP_CS_1, 1);
3373}
3374
3375static int tb_port_cl0s_disable(struct tb_port *port)
3376{
3377 return __tb_port_cl0s_set(port, false);
3378}
3379
3380static int tb_port_cl0s_enable(struct tb_port *port)
3381{
3382 return __tb_port_cl0s_set(port, true);
3383}
3384
3385static int tb_switch_enable_cl0s(struct tb_switch *sw)
3386{
3387 struct tb_switch *parent = tb_switch_parent(sw);
3388 bool up_cl0s_support, down_cl0s_support;
3389 struct tb_port *up, *down;
3390 int ret;
3391
3392 if (!tb_switch_is_usb4(sw))
3393 return 0;
3394
3395 /*
3396 * Enable CLx for host router's downstream port as part of the
3397 * downstream router enabling procedure.
3398 */
3399 if (!tb_route(sw))
3400 return 0;
3401
3402 /* Enable CLx only for first hop router (depth = 1) */
3403 if (tb_route(parent))
3404 return 0;
3405
3406 ret = tb_switch_pm_secondary_resolve(sw);
3407 if (ret)
3408 return ret;
3409
3410 up = tb_upstream_port(sw);
3411 down = tb_port_at(tb_route(sw), parent);
3412
3413 up_cl0s_support = tb_port_cl0s_supported(up);
3414 down_cl0s_support = tb_port_cl0s_supported(down);
3415
3416 tb_port_dbg(up, "CL0s %ssupported\n",
3417 up_cl0s_support ? "" : "not ");
3418 tb_port_dbg(down, "CL0s %ssupported\n",
3419 down_cl0s_support ? "" : "not ");
3420
3421 if (!up_cl0s_support || !down_cl0s_support)
3422 return -EOPNOTSUPP;
3423
3424 ret = tb_port_cl0s_enable(up);
3425 if (ret)
3426 return ret;
3427
3428 ret = tb_port_cl0s_enable(down);
3429 if (ret) {
3430 tb_port_cl0s_disable(up);
3431 return ret;
3432 }
3433
3434 sw->clx = TB_CL0S;
3435
3436 tb_port_dbg(up, "CL0s enabled\n");
3437 return 0;
3438}
3439
3440/**
3441 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3442 * @sw: Router to enable CLx for
3443 * @clx: The CLx state to enable
3444 *
3445 * Enable CLx state only for first hop router. That is the most common
3446 * use-case, that is intended for better thermal management, and so helps
3447 * to improve performance. CLx is enabled only if both sides of the link
3448 * support CLx, and if both sides of the link are not configured as two
3449 * single lane links and only if the link is not inter-domain link. The
3450 * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
3451 *
3452 * Return: Returns 0 on success or an error code on failure.
3453 */
3454int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3455{
3456 struct tb_switch *root_sw = sw->tb->root_switch;
3457
3458 /*
3459 * CLx is not enabled and validated on Intel USB4 platforms before
3460 * Alder Lake.
3461 */
3462 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3463 return 0;
3464
3465 switch (clx) {
3466 case TB_CL0S:
3467 return tb_switch_enable_cl0s(sw);
3468
3469 default:
3470 return -EOPNOTSUPP;
3471 }
3472}
3473
3474static int tb_switch_disable_cl0s(struct tb_switch *sw)
3475{
3476 struct tb_switch *parent = tb_switch_parent(sw);
3477 struct tb_port *up, *down;
3478 int ret;
3479
3480 if (!tb_switch_is_usb4(sw))
3481 return 0;
3482
3483 /*
3484 * Disable CLx for host router's downstream port as part of the
3485 * downstream router enabling procedure.
3486 */
3487 if (!tb_route(sw))
3488 return 0;
3489
3490 /* Disable CLx only for first hop router (depth = 1) */
3491 if (tb_route(parent))
3492 return 0;
3493
3494 up = tb_upstream_port(sw);
3495 down = tb_port_at(tb_route(sw), parent);
3496 ret = tb_port_cl0s_disable(up);
3497 if (ret)
3498 return ret;
3499
3500 ret = tb_port_cl0s_disable(down);
3501 if (ret)
3502 return ret;
3503
3504 sw->clx = TB_CLX_DISABLE;
3505
3506 tb_port_dbg(up, "CL0s disabled\n");
3507 return 0;
3508}
3509
3510/**
3511 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3512 * @sw: Router to disable CLx for
3513 * @clx: The CLx state to disable
3514 *
3515 * Return: Returns 0 on success or an error code on failure.
3516 */
3517int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3518{
3519 switch (clx) {
3520 case TB_CL0S:
3521 return tb_switch_disable_cl0s(sw);
3522
3523 default:
3524 return -EOPNOTSUPP;
3525 }
3526}