thunderbolt: Move port CL state functions into correct place in switch.c
[linux-block.git] / drivers / thunderbolt / switch.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a25c8b2f 2/*
15c6784c 3 * Thunderbolt driver - switch/port utility functions
a25c8b2f
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
15c6784c 6 * Copyright (C) 2018, Intel Corporation
a25c8b2f
AN
7 */
8
9#include <linux/delay.h>
e6b245cc
MW
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
2d8ff0b5 12#include <linux/pm_runtime.h>
09f11b6c 13#include <linux/sched/signal.h>
e6b245cc 14#include <linux/sizes.h>
10fefe56 15#include <linux/slab.h>
fa487b2a 16#include <linux/module.h>
a25c8b2f
AN
17
18#include "tb.h"
19
e6b245cc
MW
20/* Switch NVM support */
21
e6b245cc 22#define NVM_CSS 0x10
e6b245cc
MW
23
24struct nvm_auth_status {
25 struct list_head list;
7c39ffe7 26 uuid_t uuid;
e6b245cc
MW
27 u32 status;
28};
29
fa487b2a
GF
30static bool clx_enabled = true;
31module_param_named(clx, clx_enabled, bool, 0444);
32MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
33
e6b245cc
MW
34/*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39static LIST_HEAD(nvm_auth_status_cache);
40static DEFINE_MUTEX(nvm_auth_status_lock);
41
42static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43{
44 struct nvm_auth_status *st;
45
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
7c39ffe7 47 if (uuid_equal(&st->uuid, sw->uuid))
e6b245cc
MW
48 return st;
49 }
50
51 return NULL;
52}
53
54static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55{
56 struct nvm_auth_status *st;
57
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
61
62 *status = st ? st->status : 0;
63}
64
65static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66{
67 struct nvm_auth_status *st;
68
69 if (WARN_ON(!sw->uuid))
70 return;
71
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
74
75 if (!st) {
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
77 if (!st)
78 goto unlock;
79
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
83 }
84
85 st->status = status;
86unlock:
87 mutex_unlock(&nvm_auth_status_lock);
88}
89
90static void nvm_clear_auth_status(const struct tb_switch *sw)
91{
92 struct nvm_auth_status *st;
93
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
96 if (st) {
97 list_del(&st->list);
98 kfree(st);
99 }
100 mutex_unlock(&nvm_auth_status_lock);
101}
102
103static int nvm_validate_and_write(struct tb_switch *sw)
104{
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
107 u16 ds_size;
108 int ret;
109
110 if (!buf)
111 return -EINVAL;
112
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 return -EINVAL;
116
117 /*
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
120 */
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
123 return -EINVAL;
124
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
127 return -EINVAL;
128
129 /*
130 * Read digital section size and check that it also fits inside
131 * the image.
132 */
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
135 return -EINVAL;
136
137 if (!sw->safe_mode) {
138 u16 device_id;
139
140 /*
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
143 */
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
146 return -EINVAL;
147
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 /* Skip headers in the image */
158 buf += hdr_size;
159 image_size -= hdr_size;
160 }
161
b0407983 162 if (tb_switch_is_usb4(sw))
4b794f80
ML
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 else
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 if (!ret)
167 sw->nvm->flushed = true;
168 return ret;
e6b245cc
MW
169}
170
b0407983 171static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
e6b245cc 172{
7a7ebfa8 173 int ret = 0;
e6b245cc
MW
174
175 /*
176 * Root switch NVM upgrade requires that we disconnect the
d1ff7024 177 * existing paths first (in case it is not in safe mode
e6b245cc
MW
178 * already).
179 */
180 if (!sw->safe_mode) {
7a7ebfa8
MW
181 u32 status;
182
d1ff7024 183 ret = tb_domain_disconnect_all_paths(sw->tb);
e6b245cc
MW
184 if (ret)
185 return ret;
186 /*
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
189 */
190 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
191 if (!ret || ret == -ETIMEDOUT)
192 return 0;
193
194 /*
195 * Any error from update auth operation requires power
196 * cycling of the host router.
197 */
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
e6b245cc
MW
201 }
202
203 /*
204 * From safe mode we can get out by just power cycling the
205 * switch.
206 */
207 dma_port_power_cycle(sw->dma_port);
7a7ebfa8 208 return ret;
e6b245cc
MW
209}
210
b0407983 211static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
e6b245cc
MW
212{
213 int ret, retries = 10;
214
215 ret = dma_port_flash_update_auth(sw->dma_port);
7a7ebfa8
MW
216 switch (ret) {
217 case 0:
218 case -ETIMEDOUT:
219 case -EACCES:
220 case -EINVAL:
221 /* Power cycle is required */
222 break;
223 default:
e6b245cc 224 return ret;
7a7ebfa8 225 }
e6b245cc
MW
226
227 /*
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
232 */
233 do {
234 u32 status;
235
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
238 return ret;
239 if (ret > 0) {
240 if (status) {
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
243 }
244
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
247 return 0;
248 }
249
250 msleep(500);
251 } while (--retries);
252
253 return -ETIMEDOUT;
254}
255
b0407983
MW
256static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257{
258 struct pci_dev *root_port;
259
260 /*
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
265 */
6ae72bfa 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
267 if (root_port)
268 pm_runtime_get_noresume(&root_port->dev);
269}
270
271static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272{
273 struct pci_dev *root_port;
274
6ae72bfa 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
b0407983
MW
276 if (root_port)
277 pm_runtime_put(&root_port->dev);
278}
279
280static inline bool nvm_readable(struct tb_switch *sw)
281{
282 if (tb_switch_is_usb4(sw)) {
283 /*
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
288 */
289 return usb4_switch_nvm_sector_size(sw) > 0;
290 }
291
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
294}
295
296static inline bool nvm_upgradeable(struct tb_switch *sw)
297{
298 if (sw->no_nvm_upgrade)
299 return false;
300 return nvm_readable(sw);
301}
302
303static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
305{
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
309}
310
1cbf680f 311static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
b0407983
MW
312{
313 int ret;
314
1cbf680f
MW
315 if (tb_switch_is_usb4(sw)) {
316 if (auth_only) {
317 ret = usb4_switch_nvm_set_offset(sw, 0);
318 if (ret)
319 return ret;
320 }
321 sw->nvm->authenticating = true;
b0407983 322 return usb4_switch_nvm_authenticate(sw);
1cbf680f
MW
323 } else if (auth_only) {
324 return -EOPNOTSUPP;
325 }
b0407983 326
1cbf680f 327 sw->nvm->authenticating = true;
b0407983
MW
328 if (!tb_route(sw)) {
329 nvm_authenticate_start_dma_port(sw);
330 ret = nvm_authenticate_host_dma_port(sw);
331 } else {
332 ret = nvm_authenticate_device_dma_port(sw);
333 }
334
335 return ret;
336}
337
e6b245cc
MW
338static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
339 size_t bytes)
340{
719a5fe8
MW
341 struct tb_nvm *nvm = priv;
342 struct tb_switch *sw = tb_to_switch(nvm->dev);
2d8ff0b5
MW
343 int ret;
344
345 pm_runtime_get_sync(&sw->dev);
4f7c2e0d
MW
346
347 if (!mutex_trylock(&sw->tb->lock)) {
348 ret = restart_syscall();
349 goto out;
350 }
351
b0407983 352 ret = nvm_read(sw, offset, val, bytes);
4f7c2e0d
MW
353 mutex_unlock(&sw->tb->lock);
354
355out:
2d8ff0b5
MW
356 pm_runtime_mark_last_busy(&sw->dev);
357 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 358
2d8ff0b5 359 return ret;
e6b245cc
MW
360}
361
362static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
363 size_t bytes)
364{
719a5fe8
MW
365 struct tb_nvm *nvm = priv;
366 struct tb_switch *sw = tb_to_switch(nvm->dev);
367 int ret;
e6b245cc 368
09f11b6c
MW
369 if (!mutex_trylock(&sw->tb->lock))
370 return restart_syscall();
e6b245cc
MW
371
372 /*
373 * Since writing the NVM image might require some special steps,
374 * for example when CSS headers are written, we cache the image
375 * locally here and handle the special cases when the user asks
376 * us to authenticate the image.
377 */
719a5fe8 378 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
09f11b6c 379 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
380
381 return ret;
382}
383
e6b245cc
MW
384static int tb_switch_nvm_add(struct tb_switch *sw)
385{
719a5fe8 386 struct tb_nvm *nvm;
e6b245cc
MW
387 u32 val;
388 int ret;
389
b0407983 390 if (!nvm_readable(sw))
e6b245cc
MW
391 return 0;
392
b0407983
MW
393 /*
394 * The NVM format of non-Intel hardware is not known so
395 * currently restrict NVM upgrade for Intel hardware. We may
396 * relax this in the future when we learn other NVM formats.
397 */
83d17036
MW
398 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
399 sw->config.vendor_id != 0x8087) {
b0407983
MW
400 dev_info(&sw->dev,
401 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
402 sw->config.vendor_id);
403 return 0;
404 }
405
719a5fe8
MW
406 nvm = tb_nvm_alloc(&sw->dev);
407 if (IS_ERR(nvm))
408 return PTR_ERR(nvm);
e6b245cc
MW
409
410 /*
411 * If the switch is in safe-mode the only accessible portion of
412 * the NVM is the non-active one where userspace is expected to
413 * write new functional NVM.
414 */
415 if (!sw->safe_mode) {
416 u32 nvm_size, hdr_size;
417
b0407983 418 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
e6b245cc 419 if (ret)
719a5fe8 420 goto err_nvm;
e6b245cc
MW
421
422 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
423 nvm_size = (SZ_1M << (val & 7)) / 8;
424 nvm_size = (nvm_size - hdr_size) / 2;
425
b0407983 426 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
e6b245cc 427 if (ret)
719a5fe8 428 goto err_nvm;
e6b245cc
MW
429
430 nvm->major = val >> 16;
431 nvm->minor = val >> 8;
432
719a5fe8
MW
433 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
434 if (ret)
435 goto err_nvm;
e6b245cc
MW
436 }
437
3f415e5e 438 if (!sw->no_nvm_upgrade) {
719a5fe8
MW
439 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
440 tb_switch_nvm_write);
441 if (ret)
442 goto err_nvm;
e6b245cc 443 }
e6b245cc 444
e6b245cc 445 sw->nvm = nvm;
e6b245cc
MW
446 return 0;
447
719a5fe8
MW
448err_nvm:
449 tb_nvm_free(nvm);
e6b245cc
MW
450 return ret;
451}
452
453static void tb_switch_nvm_remove(struct tb_switch *sw)
454{
719a5fe8 455 struct tb_nvm *nvm;
e6b245cc 456
e6b245cc
MW
457 nvm = sw->nvm;
458 sw->nvm = NULL;
e6b245cc
MW
459
460 if (!nvm)
461 return;
462
463 /* Remove authentication status in case the switch is unplugged */
464 if (!nvm->authenticating)
465 nvm_clear_auth_status(sw);
466
719a5fe8 467 tb_nvm_free(nvm);
e6b245cc
MW
468}
469
a25c8b2f
AN
470/* port utility functions */
471
1c561e4e 472static const char *tb_port_type(const struct tb_regs_port_header *port)
a25c8b2f
AN
473{
474 switch (port->type >> 16) {
475 case 0:
476 switch ((u8) port->type) {
477 case 0:
478 return "Inactive";
479 case 1:
480 return "Port";
481 case 2:
482 return "NHI";
483 default:
484 return "unknown";
485 }
486 case 0x2:
487 return "Ethernet";
488 case 0x8:
489 return "SATA";
490 case 0xe:
491 return "DP/HDMI";
492 case 0x10:
493 return "PCIe";
494 case 0x20:
495 return "USB";
496 default:
497 return "unknown";
498 }
499}
500
56ad3aef 501static void tb_dump_port(struct tb *tb, const struct tb_port *port)
a25c8b2f 502{
56ad3aef
MW
503 const struct tb_regs_port_header *regs = &port->config;
504
daa5140f
MW
505 tb_dbg(tb,
506 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
56ad3aef
MW
507 regs->port_number, regs->vendor_id, regs->device_id,
508 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
509 regs->type);
daa5140f 510 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
56ad3aef
MW
511 regs->max_in_hop_id, regs->max_out_hop_id);
512 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
513 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
514 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
515 port->ctl_credits);
a25c8b2f
AN
516}
517
9da672a4
AN
518/**
519 * tb_port_state() - get connectedness state of a port
5cc0df9c 520 * @port: the port to check
9da672a4
AN
521 *
522 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
523 *
524 * Return: Returns an enum tb_port_state on success or an error code on failure.
525 */
5cc0df9c 526int tb_port_state(struct tb_port *port)
9da672a4
AN
527{
528 struct tb_cap_phy phy;
529 int res;
530 if (port->cap_phy == 0) {
531 tb_port_WARN(port, "does not have a PHY\n");
532 return -EINVAL;
533 }
534 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
535 if (res)
536 return res;
537 return phy.state;
538}
539
540/**
541 * tb_wait_for_port() - wait for a port to become ready
5c6b471b
MW
542 * @port: Port to wait
543 * @wait_if_unplugged: Wait also when port is unplugged
9da672a4
AN
544 *
545 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
546 * wait_if_unplugged is set then we also wait if the port is in state
547 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
548 * switch resume). Otherwise we only wait if a device is registered but the link
549 * has not yet been established.
550 *
551 * Return: Returns an error code on failure. Returns 0 if the port is not
552 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
553 * if the port is connected and in state TB_PORT_UP.
554 */
555int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
556{
557 int retries = 10;
558 int state;
559 if (!port->cap_phy) {
560 tb_port_WARN(port, "does not have PHY\n");
561 return -EINVAL;
562 }
563 if (tb_is_upstream_port(port)) {
564 tb_port_WARN(port, "is the upstream port\n");
565 return -EINVAL;
566 }
567
568 while (retries--) {
569 state = tb_port_state(port);
570 if (state < 0)
571 return state;
572 if (state == TB_PORT_DISABLED) {
62efe699 573 tb_port_dbg(port, "is disabled (state: 0)\n");
9da672a4
AN
574 return 0;
575 }
576 if (state == TB_PORT_UNPLUGGED) {
577 if (wait_if_unplugged) {
578 /* used during resume */
62efe699
MW
579 tb_port_dbg(port,
580 "is unplugged (state: 7), retrying...\n");
9da672a4
AN
581 msleep(100);
582 continue;
583 }
62efe699 584 tb_port_dbg(port, "is unplugged (state: 7)\n");
9da672a4
AN
585 return 0;
586 }
587 if (state == TB_PORT_UP) {
62efe699 588 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
9da672a4
AN
589 return 1;
590 }
591
592 /*
593 * After plug-in the state is TB_PORT_CONNECTING. Give it some
594 * time.
595 */
62efe699
MW
596 tb_port_dbg(port,
597 "is connected, link is not up (state: %d), retrying...\n",
598 state);
9da672a4
AN
599 msleep(100);
600 }
601 tb_port_warn(port,
602 "failed to reach state TB_PORT_UP. Ignoring port...\n");
603 return 0;
604}
605
520b6702
AN
606/**
607 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
5c6b471b
MW
608 * @port: Port to add/remove NFC credits
609 * @credits: Credits to add/remove
520b6702
AN
610 *
611 * Change the number of NFC credits allocated to @port by @credits. To remove
612 * NFC credits pass a negative amount of credits.
613 *
614 * Return: Returns 0 on success or an error code on failure.
615 */
616int tb_port_add_nfc_credits(struct tb_port *port, int credits)
617{
c5ee6feb
MW
618 u32 nfc_credits;
619
620 if (credits == 0 || port->sw->is_unplugged)
520b6702 621 return 0;
c5ee6feb 622
edfbd68b
MW
623 /*
624 * USB4 restricts programming NFC buffers to lane adapters only
625 * so skip other ports.
626 */
627 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
628 return 0;
629
8f57d478 630 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
6cb27a04
MW
631 if (credits < 0)
632 credits = max_t(int, -nfc_credits, credits);
633
c5ee6feb
MW
634 nfc_credits += credits;
635
8f57d478
MW
636 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
637 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
c5ee6feb 638
8f57d478 639 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
c5ee6feb
MW
640 port->config.nfc_credits |= nfc_credits;
641
520b6702 642 return tb_port_write(port, &port->config.nfc_credits,
8f57d478 643 TB_CFG_PORT, ADP_CS_4, 1);
520b6702
AN
644}
645
646/**
647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
5c6b471b
MW
648 * @port: Port whose counters to clear
649 * @counter: Counter index to clear
520b6702
AN
650 *
651 * Return: Returns 0 on success or an error code on failure.
652 */
653int tb_port_clear_counter(struct tb_port *port, int counter)
654{
655 u32 zero[3] = { 0, 0, 0 };
62efe699 656 tb_port_dbg(port, "clearing counter %d\n", counter);
520b6702
AN
657 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
658}
659
b0407983
MW
660/**
661 * tb_port_unlock() - Unlock downstream port
662 * @port: Port to unlock
663 *
664 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
665 * downstream router accessible for CM.
666 */
667int tb_port_unlock(struct tb_port *port)
668{
669 if (tb_switch_is_icm(port->sw))
670 return 0;
671 if (!tb_port_is_null(port))
672 return -EINVAL;
673 if (tb_switch_is_usb4(port->sw))
674 return usb4_port_unlock(port);
675 return 0;
676}
677
341d4518
MW
678static int __tb_port_enable(struct tb_port *port, bool enable)
679{
680 int ret;
681 u32 phy;
682
683 if (!tb_port_is_null(port))
684 return -EINVAL;
685
686 ret = tb_port_read(port, &phy, TB_CFG_PORT,
687 port->cap_phy + LANE_ADP_CS_1, 1);
688 if (ret)
689 return ret;
690
691 if (enable)
692 phy &= ~LANE_ADP_CS_1_LD;
693 else
694 phy |= LANE_ADP_CS_1_LD;
695
90f720d2
MW
696
697 ret = tb_port_write(port, &phy, TB_CFG_PORT,
698 port->cap_phy + LANE_ADP_CS_1, 1);
699 if (ret)
700 return ret;
701
702 tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
703 return 0;
341d4518
MW
704}
705
706/**
707 * tb_port_enable() - Enable lane adapter
708 * @port: Port to enable (can be %NULL)
709 *
710 * This is used for lane 0 and 1 adapters to enable it.
711 */
712int tb_port_enable(struct tb_port *port)
713{
714 return __tb_port_enable(port, true);
715}
716
717/**
718 * tb_port_disable() - Disable lane adapter
719 * @port: Port to disable (can be %NULL)
720 *
721 * This is used for lane 0 and 1 adapters to disable it.
722 */
723int tb_port_disable(struct tb_port *port)
724{
725 return __tb_port_enable(port, false);
726}
727
47ba5ae4 728/*
a25c8b2f
AN
729 * tb_init_port() - initialize a port
730 *
731 * This is a helper method for tb_switch_alloc. Does not check or initialize
732 * any downstream switches.
733 *
734 * Return: Returns 0 on success or an error code on failure.
735 */
343fcb8c 736static int tb_init_port(struct tb_port *port)
a25c8b2f
AN
737{
738 int res;
9da672a4 739 int cap;
343fcb8c 740
fb7a89ad
SM
741 INIT_LIST_HEAD(&port->list);
742
743 /* Control adapter does not have configuration space */
744 if (!port->port)
745 return 0;
746
a25c8b2f 747 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
d94dcbb1
MW
748 if (res) {
749 if (res == -ENODEV) {
750 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
751 port->port);
8824d19b 752 port->disabled = true;
d94dcbb1
MW
753 return 0;
754 }
a25c8b2f 755 return res;
d94dcbb1 756 }
a25c8b2f 757
9da672a4 758 /* Port 0 is the switch itself and has no PHY. */
fb7a89ad 759 if (port->config.type == TB_TYPE_PORT) {
da2da04b 760 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
9da672a4
AN
761
762 if (cap > 0)
763 port->cap_phy = cap;
764 else
765 tb_port_WARN(port, "non switch port without a PHY\n");
b0407983
MW
766
767 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
768 if (cap > 0)
769 port->cap_usb4 = cap;
56ad3aef
MW
770
771 /*
772 * USB4 ports the buffers allocated for the control path
773 * can be read from the path config space. Legacy
774 * devices we use hard-coded value.
775 */
776 if (tb_switch_is_usb4(port->sw)) {
777 struct tb_regs_hop hop;
778
779 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
780 port->ctl_credits = hop.initial_credits;
781 }
782 if (!port->ctl_credits)
783 port->ctl_credits = 2;
784
fb7a89ad 785 } else {
56183c88
MW
786 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
787 if (cap > 0)
788 port->cap_adap = cap;
9da672a4
AN
789 }
790
56ad3aef
MW
791 port->total_credits =
792 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
793 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
794
795 tb_dump_port(port->sw->tb, port);
a25c8b2f 796 return 0;
a25c8b2f
AN
797}
798
0b2863ac
MW
799static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
800 int max_hopid)
801{
802 int port_max_hopid;
803 struct ida *ida;
804
805 if (in) {
806 port_max_hopid = port->config.max_in_hop_id;
807 ida = &port->in_hopids;
808 } else {
809 port_max_hopid = port->config.max_out_hop_id;
810 ida = &port->out_hopids;
811 }
812
12676423
MW
813 /*
814 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
815 * reserved.
816 */
a3cfebdc 817 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
0b2863ac
MW
818 min_hopid = TB_PATH_MIN_HOPID;
819
820 if (max_hopid < 0 || max_hopid > port_max_hopid)
821 max_hopid = port_max_hopid;
822
823 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
824}
825
826/**
827 * tb_port_alloc_in_hopid() - Allocate input HopID from port
828 * @port: Port to allocate HopID for
829 * @min_hopid: Minimum acceptable input HopID
830 * @max_hopid: Maximum acceptable input HopID
831 *
832 * Return: HopID between @min_hopid and @max_hopid or negative errno in
833 * case of error.
834 */
835int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
836{
837 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
838}
839
840/**
841 * tb_port_alloc_out_hopid() - Allocate output HopID from port
842 * @port: Port to allocate HopID for
843 * @min_hopid: Minimum acceptable output HopID
844 * @max_hopid: Maximum acceptable output HopID
845 *
846 * Return: HopID between @min_hopid and @max_hopid or negative errno in
847 * case of error.
848 */
849int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
850{
851 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
852}
853
854/**
855 * tb_port_release_in_hopid() - Release allocated input HopID from port
856 * @port: Port whose HopID to release
857 * @hopid: HopID to release
858 */
859void tb_port_release_in_hopid(struct tb_port *port, int hopid)
860{
861 ida_simple_remove(&port->in_hopids, hopid);
862}
863
864/**
865 * tb_port_release_out_hopid() - Release allocated output HopID from port
866 * @port: Port whose HopID to release
867 * @hopid: HopID to release
868 */
869void tb_port_release_out_hopid(struct tb_port *port, int hopid)
870{
871 ida_simple_remove(&port->out_hopids, hopid);
872}
873
69eb79f7
MW
874static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
875 const struct tb_switch *sw)
876{
877 u64 mask = (1ULL << parent->config.depth * 8) - 1;
878 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
879}
880
fb19fac1
MW
881/**
882 * tb_next_port_on_path() - Return next port for given port on a path
883 * @start: Start port of the walk
884 * @end: End port of the walk
885 * @prev: Previous port (%NULL if this is the first)
886 *
887 * This function can be used to walk from one port to another if they
888 * are connected through zero or more switches. If the @prev is dual
889 * link port, the function follows that link and returns another end on
890 * that same link.
891 *
892 * If the @end port has been reached, return %NULL.
893 *
894 * Domain tb->lock must be held when this function is called.
895 */
896struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
897 struct tb_port *prev)
898{
899 struct tb_port *next;
900
901 if (!prev)
902 return start;
903
904 if (prev->sw == end->sw) {
905 if (prev == end)
906 return NULL;
907 return end;
908 }
909
69eb79f7
MW
910 if (tb_switch_is_reachable(prev->sw, end->sw)) {
911 next = tb_port_at(tb_route(end->sw), prev->sw);
912 /* Walk down the topology if next == prev */
fb19fac1 913 if (prev->remote &&
69eb79f7 914 (next == prev || next->dual_link_port == prev))
fb19fac1 915 next = prev->remote;
fb19fac1
MW
916 } else {
917 if (tb_is_upstream_port(prev)) {
918 next = prev->remote;
919 } else {
920 next = tb_upstream_port(prev->sw);
921 /*
922 * Keep the same link if prev and next are both
923 * dual link ports.
924 */
925 if (next->dual_link_port &&
926 next->link_nr != prev->link_nr) {
927 next = next->dual_link_port;
928 }
929 }
930 }
931
69eb79f7 932 return next != prev ? next : NULL;
fb19fac1
MW
933}
934
5b7b8c0a
MW
935/**
936 * tb_port_get_link_speed() - Get current link speed
937 * @port: Port to check (USB4 or CIO)
938 *
939 * Returns link speed in Gb/s or negative errno in case of failure.
940 */
941int tb_port_get_link_speed(struct tb_port *port)
91c0c120
MW
942{
943 u32 val, speed;
944 int ret;
945
946 if (!port->cap_phy)
947 return -EINVAL;
948
949 ret = tb_port_read(port, &val, TB_CFG_PORT,
950 port->cap_phy + LANE_ADP_CS_1, 1);
951 if (ret)
952 return ret;
953
954 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
955 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
956 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
957}
958
4210d50f
IH
959/**
960 * tb_port_get_link_width() - Get current link width
961 * @port: Port to check (USB4 or CIO)
962 *
963 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
964 * or negative errno in case of failure.
965 */
966int tb_port_get_link_width(struct tb_port *port)
91c0c120
MW
967{
968 u32 val;
969 int ret;
970
971 if (!port->cap_phy)
972 return -EINVAL;
973
974 ret = tb_port_read(port, &val, TB_CFG_PORT,
975 port->cap_phy + LANE_ADP_CS_1, 1);
976 if (ret)
977 return ret;
978
979 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
980 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
981}
982
983static bool tb_port_is_width_supported(struct tb_port *port, int width)
984{
985 u32 phy, widths;
986 int ret;
987
988 if (!port->cap_phy)
989 return false;
990
991 ret = tb_port_read(port, &phy, TB_CFG_PORT,
992 port->cap_phy + LANE_ADP_CS_0, 1);
993 if (ret)
e9d0e751 994 return false;
91c0c120
MW
995
996 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
997 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
998
999 return !!(widths & width);
1000}
1001
0e14dd5e
MW
1002/**
1003 * tb_port_set_link_width() - Set target link width of the lane adapter
1004 * @port: Lane adapter
1005 * @width: Target link width (%1 or %2)
1006 *
1007 * Sets the target link width of the lane adapter to @width. Does not
1008 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1009 *
1010 * Return: %0 in case of success and negative errno in case of error
1011 */
1012int tb_port_set_link_width(struct tb_port *port, unsigned int width)
91c0c120
MW
1013{
1014 u32 val;
1015 int ret;
1016
1017 if (!port->cap_phy)
1018 return -EINVAL;
1019
1020 ret = tb_port_read(port, &val, TB_CFG_PORT,
1021 port->cap_phy + LANE_ADP_CS_1, 1);
1022 if (ret)
1023 return ret;
1024
1025 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1026 switch (width) {
1027 case 1:
1028 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1029 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1030 break;
1031 case 2:
1032 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1033 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1034 break;
1035 default:
1036 return -EINVAL;
1037 }
1038
91c0c120
MW
1039 return tb_port_write(port, &val, TB_CFG_PORT,
1040 port->cap_phy + LANE_ADP_CS_1, 1);
1041}
1042
0e14dd5e
MW
1043/**
1044 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1045 * @port: Lane adapter
1046 * @bonding: enable/disable bonding
1047 *
1048 * Enables or disables lane bonding. This should be called after target
1049 * link width has been set (tb_port_set_link_width()). Note in most
1050 * cases one should use tb_port_lane_bonding_enable() instead to enable
1051 * lane bonding.
1052 *
1053 * As a side effect sets @port->bonding accordingly (and does the same
1054 * for lane 1 too).
1055 *
1056 * Return: %0 in case of success and negative errno in case of error
1057 */
1058int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1059{
1060 u32 val;
1061 int ret;
1062
1063 if (!port->cap_phy)
1064 return -EINVAL;
1065
1066 ret = tb_port_read(port, &val, TB_CFG_PORT,
1067 port->cap_phy + LANE_ADP_CS_1, 1);
1068 if (ret)
1069 return ret;
1070
1071 if (bonding)
1072 val |= LANE_ADP_CS_1_LB;
1073 else
1074 val &= ~LANE_ADP_CS_1_LB;
1075
1076 ret = tb_port_write(port, &val, TB_CFG_PORT,
1077 port->cap_phy + LANE_ADP_CS_1, 1);
1078 if (ret)
1079 return ret;
1080
1081 /*
1082 * When lane 0 bonding is set it will affect lane 1 too so
1083 * update both.
1084 */
1085 port->bonded = bonding;
1086 port->dual_link_port->bonded = bonding;
1087
1088 return 0;
1089}
1090
5cc0df9c
IH
1091/**
1092 * tb_port_lane_bonding_enable() - Enable bonding on port
1093 * @port: port to enable
1094 *
e7051bea
MW
1095 * Enable bonding by setting the link width of the port and the other
1096 * port in case of dual link port. Does not wait for the link to
1097 * actually reach the bonded state so caller needs to call
1098 * tb_port_wait_for_link_width() before enabling any paths through the
1099 * link to make sure the link is in expected state.
5cc0df9c
IH
1100 *
1101 * Return: %0 in case of success and negative errno in case of error
1102 */
1103int tb_port_lane_bonding_enable(struct tb_port *port)
91c0c120
MW
1104{
1105 int ret;
1106
1107 /*
1108 * Enable lane bonding for both links if not already enabled by
1109 * for example the boot firmware.
1110 */
1111 ret = tb_port_get_link_width(port);
1112 if (ret == 1) {
1113 ret = tb_port_set_link_width(port, 2);
1114 if (ret)
0e14dd5e 1115 goto err_lane0;
91c0c120
MW
1116 }
1117
1118 ret = tb_port_get_link_width(port->dual_link_port);
1119 if (ret == 1) {
1120 ret = tb_port_set_link_width(port->dual_link_port, 2);
0e14dd5e
MW
1121 if (ret)
1122 goto err_lane0;
91c0c120
MW
1123 }
1124
0e14dd5e
MW
1125 ret = tb_port_set_lane_bonding(port, true);
1126 if (ret)
1127 goto err_lane1;
91c0c120
MW
1128
1129 return 0;
0e14dd5e
MW
1130
1131err_lane1:
1132 tb_port_set_link_width(port->dual_link_port, 1);
1133err_lane0:
1134 tb_port_set_link_width(port, 1);
1135 return ret;
91c0c120
MW
1136}
1137
5cc0df9c
IH
1138/**
1139 * tb_port_lane_bonding_disable() - Disable bonding on port
1140 * @port: port to disable
1141 *
1142 * Disable bonding by setting the link width of the port and the
1143 * other port in case of dual link port.
5cc0df9c
IH
1144 */
1145void tb_port_lane_bonding_disable(struct tb_port *port)
91c0c120 1146{
0e14dd5e 1147 tb_port_set_lane_bonding(port, false);
91c0c120
MW
1148 tb_port_set_link_width(port->dual_link_port, 1);
1149 tb_port_set_link_width(port, 1);
1150}
1151
e7051bea
MW
1152/**
1153 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1154 * @port: Port to wait for
1155 * @width: Expected link width (%1 or %2)
1156 * @timeout_msec: Timeout in ms how long to wait
1157 *
1158 * Should be used after both ends of the link have been bonded (or
1159 * bonding has been disabled) to wait until the link actually reaches
1160 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1161 * within the given timeout, %0 if it did.
1162 */
1163int tb_port_wait_for_link_width(struct tb_port *port, int width,
1164 int timeout_msec)
1165{
1166 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1167 int ret;
1168
1169 do {
1170 ret = tb_port_get_link_width(port);
0a2e1667
MW
1171 if (ret < 0) {
1172 /*
1173 * Sometimes we get port locked error when
1174 * polling the lanes so we can ignore it and
1175 * retry.
1176 */
1177 if (ret != -EACCES)
1178 return ret;
1179 } else if (ret == width) {
e7051bea 1180 return 0;
0a2e1667 1181 }
e7051bea
MW
1182
1183 usleep_range(1000, 2000);
1184 } while (ktime_before(ktime_get(), timeout));
1185
1186 return -ETIMEDOUT;
1187}
1188
69fea377
MW
1189static int tb_port_do_update_credits(struct tb_port *port)
1190{
1191 u32 nfc_credits;
1192 int ret;
1193
1194 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1195 if (ret)
1196 return ret;
1197
1198 if (nfc_credits != port->config.nfc_credits) {
1199 u32 total;
1200
1201 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1202 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1203
1204 tb_port_dbg(port, "total credits changed %u -> %u\n",
1205 port->total_credits, total);
1206
1207 port->config.nfc_credits = nfc_credits;
1208 port->total_credits = total;
1209 }
1210
1211 return 0;
1212}
1213
1214/**
1215 * tb_port_update_credits() - Re-read port total credits
1216 * @port: Port to update
1217 *
1218 * After the link is bonded (or bonding was disabled) the port total
1219 * credits may change, so this function needs to be called to re-read
1220 * the credits. Updates also the second lane adapter.
1221 */
1222int tb_port_update_credits(struct tb_port *port)
1223{
1224 int ret;
1225
1226 ret = tb_port_do_update_credits(port);
1227 if (ret)
1228 return ret;
1229 return tb_port_do_update_credits(port->dual_link_port);
1230}
1231
95f8f1cb
MW
1232static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
1233{
1234 u32 phy;
1235 int ret;
1236
1237 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1238 port->cap_phy + LANE_ADP_CS_1, 1);
1239 if (ret)
1240 return ret;
1241
1242 if (secondary)
1243 phy |= LANE_ADP_CS_1_PMS;
1244 else
1245 phy &= ~LANE_ADP_CS_1_PMS;
1246
1247 return tb_port_write(port, &phy, TB_CFG_PORT,
1248 port->cap_phy + LANE_ADP_CS_1, 1);
1249}
1250
1251static int tb_port_pm_secondary_enable(struct tb_port *port)
1252{
1253 return __tb_port_pm_secondary_set(port, true);
1254}
1255
1256static int tb_port_pm_secondary_disable(struct tb_port *port)
1257{
1258 return __tb_port_pm_secondary_set(port, false);
1259}
1260
1261/* Called for USB4 or Titan Ridge routers only */
1262static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
1263{
1264 u32 mask, val;
1265 bool ret;
1266
1267 /* Don't enable CLx in case of two single-lane links */
1268 if (!port->bonded && port->dual_link_port)
1269 return false;
1270
1271 /* Don't enable CLx in case of inter-domain link */
1272 if (port->xdomain)
1273 return false;
1274
1275 if (tb_switch_is_usb4(port->sw)) {
1276 if (!usb4_port_clx_supported(port))
1277 return false;
1278 } else if (!tb_lc_is_clx_supported(port)) {
1279 return false;
1280 }
1281
1282 switch (clx) {
1283 case TB_CL1:
1284 /* CL0s and CL1 are enabled and supported together */
1285 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
1286 break;
1287
1288 /* For now we support only CL0s and CL1. Not CL2 */
1289 case TB_CL2:
1290 default:
1291 return false;
1292 }
1293
1294 ret = tb_port_read(port, &val, TB_CFG_PORT,
1295 port->cap_phy + LANE_ADP_CS_0, 1);
1296 if (ret)
1297 return false;
1298
1299 return !!(val & mask);
1300}
1301
1302static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
1303{
1304 u32 phy, mask;
1305 int ret;
1306
1307 /* CL0s and CL1 are enabled and supported together */
1308 if (clx == TB_CL1)
1309 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
1310 else
1311 /* For now we support only CL0s and CL1. Not CL2 */
1312 return -EOPNOTSUPP;
1313
1314 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1315 port->cap_phy + LANE_ADP_CS_1, 1);
1316 if (ret)
1317 return ret;
1318
1319 if (enable)
1320 phy |= mask;
1321 else
1322 phy &= ~mask;
1323
1324 return tb_port_write(port, &phy, TB_CFG_PORT,
1325 port->cap_phy + LANE_ADP_CS_1, 1);
1326}
1327
1328static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
1329{
1330 return __tb_port_clx_set(port, clx, false);
1331}
1332
1333static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
1334{
1335 return __tb_port_clx_set(port, clx, true);
1336}
1337
fdb0887c
MW
1338static int tb_port_start_lane_initialization(struct tb_port *port)
1339{
1340 int ret;
1341
1342 if (tb_switch_is_usb4(port->sw))
1343 return 0;
1344
1345 ret = tb_lc_start_lane_initialization(port);
1346 return ret == -EINVAL ? 0 : ret;
1347}
1348
3fb10ea4
RM
1349/*
1350 * Returns true if the port had something (router, XDomain) connected
1351 * before suspend.
1352 */
1353static bool tb_port_resume(struct tb_port *port)
1354{
1355 bool has_remote = tb_port_has_remote(port);
1356
1357 if (port->usb4) {
1358 usb4_port_device_resume(port->usb4);
1359 } else if (!has_remote) {
1360 /*
1361 * For disconnected downstream lane adapters start lane
1362 * initialization now so we detect future connects.
1363 *
1364 * For XDomain start the lane initialzation now so the
1365 * link gets re-established.
1366 *
1367 * This is only needed for non-USB4 ports.
1368 */
1369 if (!tb_is_upstream_port(port) || port->xdomain)
1370 tb_port_start_lane_initialization(port);
1371 }
1372
1373 return has_remote || port->xdomain;
1374}
1375
e78db6f0
MW
1376/**
1377 * tb_port_is_enabled() - Is the adapter port enabled
1378 * @port: Port to check
1379 */
1380bool tb_port_is_enabled(struct tb_port *port)
1381{
1382 switch (port->config.type) {
1383 case TB_TYPE_PCIE_UP:
1384 case TB_TYPE_PCIE_DOWN:
1385 return tb_pci_port_is_enabled(port);
1386
4f807e47
MW
1387 case TB_TYPE_DP_HDMI_IN:
1388 case TB_TYPE_DP_HDMI_OUT:
1389 return tb_dp_port_is_enabled(port);
1390
e6f81858
RM
1391 case TB_TYPE_USB3_UP:
1392 case TB_TYPE_USB3_DOWN:
1393 return tb_usb3_port_is_enabled(port);
1394
e78db6f0
MW
1395 default:
1396 return false;
1397 }
1398}
1399
e6f81858
RM
1400/**
1401 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1402 * @port: USB3 adapter port to check
1403 */
1404bool tb_usb3_port_is_enabled(struct tb_port *port)
1405{
1406 u32 data;
1407
1408 if (tb_port_read(port, &data, TB_CFG_PORT,
1409 port->cap_adap + ADP_USB3_CS_0, 1))
1410 return false;
1411
1412 return !!(data & ADP_USB3_CS_0_PE);
1413}
1414
1415/**
1416 * tb_usb3_port_enable() - Enable USB3 adapter port
1417 * @port: USB3 adapter port to enable
1418 * @enable: Enable/disable the USB3 adapter
1419 */
1420int tb_usb3_port_enable(struct tb_port *port, bool enable)
1421{
1422 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1423 : ADP_USB3_CS_0_V;
1424
1425 if (!port->cap_adap)
1426 return -ENXIO;
1427 return tb_port_write(port, &word, TB_CFG_PORT,
1428 port->cap_adap + ADP_USB3_CS_0, 1);
1429}
1430
0414bec5
MW
1431/**
1432 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1433 * @port: PCIe port to check
1434 */
1435bool tb_pci_port_is_enabled(struct tb_port *port)
1436{
1437 u32 data;
1438
778bfca3
MW
1439 if (tb_port_read(port, &data, TB_CFG_PORT,
1440 port->cap_adap + ADP_PCIE_CS_0, 1))
0414bec5
MW
1441 return false;
1442
778bfca3 1443 return !!(data & ADP_PCIE_CS_0_PE);
0414bec5
MW
1444}
1445
93f36ade
MW
1446/**
1447 * tb_pci_port_enable() - Enable PCIe adapter port
1448 * @port: PCIe port to enable
1449 * @enable: Enable/disable the PCIe adapter
1450 */
1451int tb_pci_port_enable(struct tb_port *port, bool enable)
1452{
778bfca3 1453 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
93f36ade
MW
1454 if (!port->cap_adap)
1455 return -ENXIO;
778bfca3
MW
1456 return tb_port_write(port, &word, TB_CFG_PORT,
1457 port->cap_adap + ADP_PCIE_CS_0, 1);
93f36ade
MW
1458}
1459
4f807e47
MW
1460/**
1461 * tb_dp_port_hpd_is_active() - Is HPD already active
1462 * @port: DP out port to check
1463 *
1464 * Checks if the DP OUT adapter port has HDP bit already set.
1465 */
1466int tb_dp_port_hpd_is_active(struct tb_port *port)
1467{
1468 u32 data;
1469 int ret;
1470
98176380
MW
1471 ret = tb_port_read(port, &data, TB_CFG_PORT,
1472 port->cap_adap + ADP_DP_CS_2, 1);
4f807e47
MW
1473 if (ret)
1474 return ret;
1475
98176380 1476 return !!(data & ADP_DP_CS_2_HDP);
4f807e47
MW
1477}
1478
1479/**
1480 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1481 * @port: Port to clear HPD
1482 *
1483 * If the DP IN port has HDP set, this function can be used to clear it.
1484 */
1485int tb_dp_port_hpd_clear(struct tb_port *port)
1486{
1487 u32 data;
1488 int ret;
1489
98176380
MW
1490 ret = tb_port_read(port, &data, TB_CFG_PORT,
1491 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1492 if (ret)
1493 return ret;
1494
98176380
MW
1495 data |= ADP_DP_CS_3_HDPC;
1496 return tb_port_write(port, &data, TB_CFG_PORT,
1497 port->cap_adap + ADP_DP_CS_3, 1);
4f807e47
MW
1498}
1499
1500/**
1501 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1502 * @port: DP IN/OUT port to set hops
1503 * @video: Video Hop ID
1504 * @aux_tx: AUX TX Hop ID
1505 * @aux_rx: AUX RX Hop ID
1506 *
e5bb88e9
MW
1507 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1508 * router DP adapters too but does not program the values as the fields
1509 * are read-only.
4f807e47
MW
1510 */
1511int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1512 unsigned int aux_tx, unsigned int aux_rx)
1513{
1514 u32 data[2];
1515 int ret;
1516
e5bb88e9
MW
1517 if (tb_switch_is_usb4(port->sw))
1518 return 0;
1519
98176380
MW
1520 ret = tb_port_read(port, data, TB_CFG_PORT,
1521 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1522 if (ret)
1523 return ret;
1524
98176380
MW
1525 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1526 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1527 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1528
98176380
MW
1529 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1530 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1531 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1532 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1533 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
4f807e47 1534
98176380
MW
1535 return tb_port_write(port, data, TB_CFG_PORT,
1536 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1537}
1538
1539/**
1540 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1541 * @port: DP adapter port to check
1542 */
1543bool tb_dp_port_is_enabled(struct tb_port *port)
1544{
fd5c46b7 1545 u32 data[2];
4f807e47 1546
98176380 1547 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
fd5c46b7 1548 ARRAY_SIZE(data)))
4f807e47
MW
1549 return false;
1550
98176380 1551 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
4f807e47
MW
1552}
1553
1554/**
1555 * tb_dp_port_enable() - Enables/disables DP paths of a port
1556 * @port: DP IN/OUT port
1557 * @enable: Enable/disable DP path
1558 *
1559 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1560 * calling this function.
1561 */
1562int tb_dp_port_enable(struct tb_port *port, bool enable)
1563{
fd5c46b7 1564 u32 data[2];
4f807e47
MW
1565 int ret;
1566
98176380
MW
1567 ret = tb_port_read(port, data, TB_CFG_PORT,
1568 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1569 if (ret)
1570 return ret;
1571
1572 if (enable)
98176380 1573 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
4f807e47 1574 else
98176380 1575 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
4f807e47 1576
98176380
MW
1577 return tb_port_write(port, data, TB_CFG_PORT,
1578 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
4f807e47
MW
1579}
1580
a25c8b2f
AN
1581/* switch utility functions */
1582
b0407983
MW
1583static const char *tb_switch_generation_name(const struct tb_switch *sw)
1584{
1585 switch (sw->generation) {
1586 case 1:
1587 return "Thunderbolt 1";
1588 case 2:
1589 return "Thunderbolt 2";
1590 case 3:
1591 return "Thunderbolt 3";
1592 case 4:
1593 return "USB4";
1594 default:
1595 return "Unknown";
1596 }
1597}
1598
1599static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
a25c8b2f 1600{
b0407983
MW
1601 const struct tb_regs_switch_header *regs = &sw->config;
1602
1603 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1604 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1605 regs->revision, regs->thunderbolt_version);
1606 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
daa5140f
MW
1607 tb_dbg(tb, " Config:\n");
1608 tb_dbg(tb,
a25c8b2f 1609 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
b0407983
MW
1610 regs->upstream_port_number, regs->depth,
1611 (((u64) regs->route_hi) << 32) | regs->route_lo,
1612 regs->enabled, regs->plug_events_delay);
daa5140f 1613 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
b0407983 1614 regs->__unknown1, regs->__unknown4);
a25c8b2f
AN
1615}
1616
23dd5bb4 1617/**
2c2a2327 1618 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
356b6c4e 1619 * @sw: Switch to reset
23dd5bb4
AN
1620 *
1621 * Return: Returns 0 on success or an error code on failure.
1622 */
356b6c4e 1623int tb_switch_reset(struct tb_switch *sw)
23dd5bb4
AN
1624{
1625 struct tb_cfg_result res;
356b6c4e
MW
1626
1627 if (sw->generation > 1)
1628 return 0;
1629
1630 tb_sw_dbg(sw, "resetting switch\n");
1631
1632 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1633 TB_CFG_SWITCH, 2, 2);
23dd5bb4
AN
1634 if (res.err)
1635 return res.err;
bda83aec 1636 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
23dd5bb4
AN
1637 if (res.err > 0)
1638 return -EIO;
1639 return res.err;
1640}
1641
1639664f
GF
1642/**
1643 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1644 * @sw: Router to read the offset value from
1645 * @offset: Offset in the router config space to read from
1646 * @bit: Bit mask in the offset to wait for
1647 * @value: Value of the bits to wait for
1648 * @timeout_msec: Timeout in ms how long to wait
1649 *
1650 * Wait till the specified bits in specified offset reach specified value.
1651 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1652 * within the given timeout or a negative errno in case of failure.
1653 */
1654int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1655 u32 value, int timeout_msec)
1656{
1657 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1658
1659 do {
1660 u32 val;
1661 int ret;
1662
1663 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1664 if (ret)
1665 return ret;
1666
1667 if ((val & bit) == value)
1668 return 0;
1669
1670 usleep_range(50, 100);
1671 } while (ktime_before(ktime_get(), timeout));
1672
1673 return -ETIMEDOUT;
1674}
1675
47ba5ae4 1676/*
ca389f71
AN
1677 * tb_plug_events_active() - enable/disable plug events on a switch
1678 *
1679 * Also configures a sane plug_events_delay of 255ms.
1680 *
1681 * Return: Returns 0 on success or an error code on failure.
1682 */
1683static int tb_plug_events_active(struct tb_switch *sw, bool active)
1684{
1685 u32 data;
1686 int res;
1687
5cb6ed31 1688 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
bfe778ac
MW
1689 return 0;
1690
ca389f71
AN
1691 sw->config.plug_events_delay = 0xff;
1692 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1693 if (res)
1694 return res;
1695
1696 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1697 if (res)
1698 return res;
1699
1700 if (active) {
1701 data = data & 0xFFFFFF83;
1702 switch (sw->config.device_id) {
1d111406
LW
1703 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1704 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1705 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
ca389f71
AN
1706 break;
1707 default:
30a4eca6
MW
1708 /*
1709 * Skip Alpine Ridge, it needs to have vendor
1710 * specific USB hotplug event enabled for the
1711 * internal xHCI to work.
1712 */
1713 if (!tb_switch_is_alpine_ridge(sw))
1714 data |= TB_PLUG_EVENTS_USB_DISABLE;
ca389f71
AN
1715 }
1716 } else {
1717 data = data | 0x7c;
1718 }
1719 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1720 sw->cap_plug_events + 1, 1);
1721}
1722
f67cf491
MW
1723static ssize_t authorized_show(struct device *dev,
1724 struct device_attribute *attr,
1725 char *buf)
1726{
1727 struct tb_switch *sw = tb_to_switch(dev);
1728
1729 return sprintf(buf, "%u\n", sw->authorized);
1730}
1731
3da88be2
MW
1732static int disapprove_switch(struct device *dev, void *not_used)
1733{
1651d9e7 1734 char *envp[] = { "AUTHORIZED=0", NULL };
3da88be2
MW
1735 struct tb_switch *sw;
1736
1737 sw = tb_to_switch(dev);
1738 if (sw && sw->authorized) {
1739 int ret;
1740
1741 /* First children */
1742 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1743 if (ret)
1744 return ret;
1745
1746 ret = tb_domain_disapprove_switch(sw->tb, sw);
1747 if (ret)
1748 return ret;
1749
1750 sw->authorized = 0;
1651d9e7 1751 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
3da88be2
MW
1752 }
1753
1754 return 0;
1755}
1756
f67cf491
MW
1757static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1758{
1651d9e7 1759 char envp_string[13];
f67cf491 1760 int ret = -EINVAL;
1651d9e7 1761 char *envp[] = { envp_string, NULL };
f67cf491 1762
09f11b6c
MW
1763 if (!mutex_trylock(&sw->tb->lock))
1764 return restart_syscall();
f67cf491 1765
3da88be2 1766 if (!!sw->authorized == !!val)
f67cf491
MW
1767 goto unlock;
1768
1769 switch (val) {
3da88be2
MW
1770 /* Disapprove switch */
1771 case 0:
1772 if (tb_route(sw)) {
1773 ret = disapprove_switch(&sw->dev, NULL);
1774 goto unlock;
1775 }
1776 break;
1777
f67cf491
MW
1778 /* Approve switch */
1779 case 1:
1780 if (sw->key)
1781 ret = tb_domain_approve_switch_key(sw->tb, sw);
1782 else
1783 ret = tb_domain_approve_switch(sw->tb, sw);
1784 break;
1785
1786 /* Challenge switch */
1787 case 2:
1788 if (sw->key)
1789 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795
1796 if (!ret) {
1797 sw->authorized = val;
1651d9e7
RJ
1798 /*
1799 * Notify status change to the userspace, informing the new
1800 * value of /sys/bus/thunderbolt/devices/.../authorized.
1801 */
1802 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1803 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
f67cf491
MW
1804 }
1805
1806unlock:
09f11b6c 1807 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1808 return ret;
1809}
1810
1811static ssize_t authorized_store(struct device *dev,
1812 struct device_attribute *attr,
1813 const char *buf, size_t count)
1814{
1815 struct tb_switch *sw = tb_to_switch(dev);
1816 unsigned int val;
1817 ssize_t ret;
1818
1819 ret = kstrtouint(buf, 0, &val);
1820 if (ret)
1821 return ret;
1822 if (val > 2)
1823 return -EINVAL;
1824
4f7c2e0d 1825 pm_runtime_get_sync(&sw->dev);
f67cf491 1826 ret = tb_switch_set_authorized(sw, val);
4f7c2e0d
MW
1827 pm_runtime_mark_last_busy(&sw->dev);
1828 pm_runtime_put_autosuspend(&sw->dev);
f67cf491
MW
1829
1830 return ret ? ret : count;
1831}
1832static DEVICE_ATTR_RW(authorized);
1833
14862ee3
YB
1834static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1835 char *buf)
1836{
1837 struct tb_switch *sw = tb_to_switch(dev);
1838
1839 return sprintf(buf, "%u\n", sw->boot);
1840}
1841static DEVICE_ATTR_RO(boot);
1842
bfe778ac
MW
1843static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1844 char *buf)
1845{
1846 struct tb_switch *sw = tb_to_switch(dev);
ca389f71 1847
bfe778ac
MW
1848 return sprintf(buf, "%#x\n", sw->device);
1849}
1850static DEVICE_ATTR_RO(device);
1851
72ee3390
MW
1852static ssize_t
1853device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1854{
1855 struct tb_switch *sw = tb_to_switch(dev);
1856
1857 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1858}
1859static DEVICE_ATTR_RO(device_name);
1860
b406357c
CK
1861static ssize_t
1862generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1863{
1864 struct tb_switch *sw = tb_to_switch(dev);
1865
1866 return sprintf(buf, "%u\n", sw->generation);
1867}
1868static DEVICE_ATTR_RO(generation);
1869
f67cf491
MW
1870static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1871 char *buf)
1872{
1873 struct tb_switch *sw = tb_to_switch(dev);
1874 ssize_t ret;
1875
09f11b6c
MW
1876 if (!mutex_trylock(&sw->tb->lock))
1877 return restart_syscall();
f67cf491
MW
1878
1879 if (sw->key)
1880 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1881 else
1882 ret = sprintf(buf, "\n");
1883
09f11b6c 1884 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1885 return ret;
1886}
1887
1888static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1889 const char *buf, size_t count)
1890{
1891 struct tb_switch *sw = tb_to_switch(dev);
1892 u8 key[TB_SWITCH_KEY_SIZE];
1893 ssize_t ret = count;
e545f0d8 1894 bool clear = false;
f67cf491 1895
e545f0d8
BY
1896 if (!strcmp(buf, "\n"))
1897 clear = true;
1898 else if (hex2bin(key, buf, sizeof(key)))
f67cf491
MW
1899 return -EINVAL;
1900
09f11b6c
MW
1901 if (!mutex_trylock(&sw->tb->lock))
1902 return restart_syscall();
f67cf491
MW
1903
1904 if (sw->authorized) {
1905 ret = -EBUSY;
1906 } else {
1907 kfree(sw->key);
e545f0d8
BY
1908 if (clear) {
1909 sw->key = NULL;
1910 } else {
1911 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1912 if (!sw->key)
1913 ret = -ENOMEM;
1914 }
f67cf491
MW
1915 }
1916
09f11b6c 1917 mutex_unlock(&sw->tb->lock);
f67cf491
MW
1918 return ret;
1919}
0956e411 1920static DEVICE_ATTR(key, 0600, key_show, key_store);
f67cf491 1921
91c0c120
MW
1922static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1923 char *buf)
1924{
1925 struct tb_switch *sw = tb_to_switch(dev);
1926
1927 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1928}
1929
1930/*
1931 * Currently all lanes must run at the same speed but we expose here
1932 * both directions to allow possible asymmetric links in the future.
1933 */
1934static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1935static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1936
1937static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1938 char *buf)
1939{
1940 struct tb_switch *sw = tb_to_switch(dev);
1941
1942 return sprintf(buf, "%u\n", sw->link_width);
1943}
1944
1945/*
1946 * Currently link has same amount of lanes both directions (1 or 2) but
1947 * expose them separately to allow possible asymmetric links in the future.
1948 */
1949static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1950static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1951
e6b245cc
MW
1952static ssize_t nvm_authenticate_show(struct device *dev,
1953 struct device_attribute *attr, char *buf)
1954{
1955 struct tb_switch *sw = tb_to_switch(dev);
1956 u32 status;
1957
1958 nvm_get_auth_status(sw, &status);
1959 return sprintf(buf, "%#x\n", status);
1960}
1961
1cb36293
ML
1962static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1963 bool disconnect)
e6b245cc
MW
1964{
1965 struct tb_switch *sw = tb_to_switch(dev);
1cbf680f 1966 int val, ret;
e6b245cc 1967
4f7c2e0d
MW
1968 pm_runtime_get_sync(&sw->dev);
1969
1970 if (!mutex_trylock(&sw->tb->lock)) {
1971 ret = restart_syscall();
1972 goto exit_rpm;
1973 }
e6b245cc
MW
1974
1975 /* If NVMem devices are not yet added */
1976 if (!sw->nvm) {
1977 ret = -EAGAIN;
1978 goto exit_unlock;
1979 }
1980
4b794f80 1981 ret = kstrtoint(buf, 10, &val);
e6b245cc
MW
1982 if (ret)
1983 goto exit_unlock;
1984
1985 /* Always clear the authentication status */
1986 nvm_clear_auth_status(sw);
1987
4b794f80 1988 if (val > 0) {
1cbf680f
MW
1989 if (val == AUTHENTICATE_ONLY) {
1990 if (disconnect)
4b794f80 1991 ret = -EINVAL;
1cbf680f
MW
1992 else
1993 ret = nvm_authenticate(sw, true);
1994 } else {
1995 if (!sw->nvm->flushed) {
1996 if (!sw->nvm->buf) {
1997 ret = -EINVAL;
1998 goto exit_unlock;
1999 }
2000
2001 ret = nvm_validate_and_write(sw);
2002 if (ret || val == WRITE_ONLY)
2003 goto exit_unlock;
4b794f80 2004 }
1cbf680f
MW
2005 if (val == WRITE_AND_AUTHENTICATE) {
2006 if (disconnect)
2007 ret = tb_lc_force_power(sw);
2008 else
2009 ret = nvm_authenticate(sw, false);
1cb36293 2010 }
4b794f80 2011 }
e6b245cc
MW
2012 }
2013
2014exit_unlock:
09f11b6c 2015 mutex_unlock(&sw->tb->lock);
4f7c2e0d
MW
2016exit_rpm:
2017 pm_runtime_mark_last_busy(&sw->dev);
2018 pm_runtime_put_autosuspend(&sw->dev);
e6b245cc 2019
1cb36293
ML
2020 return ret;
2021}
2022
2023static ssize_t nvm_authenticate_store(struct device *dev,
2024 struct device_attribute *attr, const char *buf, size_t count)
2025{
2026 int ret = nvm_authenticate_sysfs(dev, buf, false);
e6b245cc
MW
2027 if (ret)
2028 return ret;
2029 return count;
2030}
2031static DEVICE_ATTR_RW(nvm_authenticate);
2032
1cb36293
ML
2033static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
2034 struct device_attribute *attr, char *buf)
2035{
2036 return nvm_authenticate_show(dev, attr, buf);
2037}
2038
2039static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2040 struct device_attribute *attr, const char *buf, size_t count)
2041{
2042 int ret;
2043
2044 ret = nvm_authenticate_sysfs(dev, buf, true);
2045 return ret ? ret : count;
2046}
2047static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2048
e6b245cc
MW
2049static ssize_t nvm_version_show(struct device *dev,
2050 struct device_attribute *attr, char *buf)
2051{
2052 struct tb_switch *sw = tb_to_switch(dev);
2053 int ret;
2054
09f11b6c
MW
2055 if (!mutex_trylock(&sw->tb->lock))
2056 return restart_syscall();
e6b245cc
MW
2057
2058 if (sw->safe_mode)
2059 ret = -ENODATA;
2060 else if (!sw->nvm)
2061 ret = -EAGAIN;
2062 else
2063 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2064
09f11b6c 2065 mutex_unlock(&sw->tb->lock);
e6b245cc
MW
2066
2067 return ret;
2068}
2069static DEVICE_ATTR_RO(nvm_version);
2070
bfe778ac
MW
2071static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2072 char *buf)
a25c8b2f 2073{
bfe778ac 2074 struct tb_switch *sw = tb_to_switch(dev);
a25c8b2f 2075
bfe778ac
MW
2076 return sprintf(buf, "%#x\n", sw->vendor);
2077}
2078static DEVICE_ATTR_RO(vendor);
2079
72ee3390
MW
2080static ssize_t
2081vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2082{
2083 struct tb_switch *sw = tb_to_switch(dev);
2084
2085 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
2086}
2087static DEVICE_ATTR_RO(vendor_name);
2088
bfe778ac
MW
2089static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2090 char *buf)
2091{
2092 struct tb_switch *sw = tb_to_switch(dev);
2093
2094 return sprintf(buf, "%pUb\n", sw->uuid);
2095}
2096static DEVICE_ATTR_RO(unique_id);
2097
2098static struct attribute *switch_attrs[] = {
f67cf491 2099 &dev_attr_authorized.attr,
14862ee3 2100 &dev_attr_boot.attr,
bfe778ac 2101 &dev_attr_device.attr,
72ee3390 2102 &dev_attr_device_name.attr,
b406357c 2103 &dev_attr_generation.attr,
f67cf491 2104 &dev_attr_key.attr,
e6b245cc 2105 &dev_attr_nvm_authenticate.attr,
1cb36293 2106 &dev_attr_nvm_authenticate_on_disconnect.attr,
e6b245cc 2107 &dev_attr_nvm_version.attr,
91c0c120
MW
2108 &dev_attr_rx_speed.attr,
2109 &dev_attr_rx_lanes.attr,
2110 &dev_attr_tx_speed.attr,
2111 &dev_attr_tx_lanes.attr,
bfe778ac 2112 &dev_attr_vendor.attr,
72ee3390 2113 &dev_attr_vendor_name.attr,
bfe778ac
MW
2114 &dev_attr_unique_id.attr,
2115 NULL,
2116};
2117
f67cf491
MW
2118static umode_t switch_attr_is_visible(struct kobject *kobj,
2119 struct attribute *attr, int n)
2120{
fff15f23 2121 struct device *dev = kobj_to_dev(kobj);
f67cf491
MW
2122 struct tb_switch *sw = tb_to_switch(dev);
2123
3cd542e6
MW
2124 if (attr == &dev_attr_authorized.attr) {
2125 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
8e334125 2126 sw->tb->security_level == TB_SECURITY_DPONLY)
3cd542e6
MW
2127 return 0;
2128 } else if (attr == &dev_attr_device.attr) {
58f414fa
MW
2129 if (!sw->device)
2130 return 0;
2131 } else if (attr == &dev_attr_device_name.attr) {
2132 if (!sw->device_name)
2133 return 0;
2134 } else if (attr == &dev_attr_vendor.attr) {
2135 if (!sw->vendor)
2136 return 0;
2137 } else if (attr == &dev_attr_vendor_name.attr) {
2138 if (!sw->vendor_name)
2139 return 0;
2140 } else if (attr == &dev_attr_key.attr) {
f67cf491
MW
2141 if (tb_route(sw) &&
2142 sw->tb->security_level == TB_SECURITY_SECURE &&
2143 sw->security_level == TB_SECURITY_SECURE)
2144 return attr->mode;
2145 return 0;
91c0c120
MW
2146 } else if (attr == &dev_attr_rx_speed.attr ||
2147 attr == &dev_attr_rx_lanes.attr ||
2148 attr == &dev_attr_tx_speed.attr ||
2149 attr == &dev_attr_tx_lanes.attr) {
2150 if (tb_route(sw))
2151 return attr->mode;
2152 return 0;
3f415e5e 2153 } else if (attr == &dev_attr_nvm_authenticate.attr) {
b0407983 2154 if (nvm_upgradeable(sw))
3f415e5e
MW
2155 return attr->mode;
2156 return 0;
2157 } else if (attr == &dev_attr_nvm_version.attr) {
b0407983 2158 if (nvm_readable(sw))
e6b245cc
MW
2159 return attr->mode;
2160 return 0;
14862ee3
YB
2161 } else if (attr == &dev_attr_boot.attr) {
2162 if (tb_route(sw))
2163 return attr->mode;
2164 return 0;
1cb36293
ML
2165 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2166 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2167 return attr->mode;
2168 return 0;
f67cf491
MW
2169 }
2170
e6b245cc 2171 return sw->safe_mode ? 0 : attr->mode;
f67cf491
MW
2172}
2173
6889e00f 2174static const struct attribute_group switch_group = {
f67cf491 2175 .is_visible = switch_attr_is_visible,
bfe778ac
MW
2176 .attrs = switch_attrs,
2177};
ca389f71 2178
bfe778ac
MW
2179static const struct attribute_group *switch_groups[] = {
2180 &switch_group,
2181 NULL,
2182};
2183
2184static void tb_switch_release(struct device *dev)
2185{
2186 struct tb_switch *sw = tb_to_switch(dev);
b433d010 2187 struct tb_port *port;
bfe778ac 2188
3e136768
MW
2189 dma_port_free(sw->dma_port);
2190
b433d010 2191 tb_switch_for_each_port(sw, port) {
781e14ea
MW
2192 ida_destroy(&port->in_hopids);
2193 ida_destroy(&port->out_hopids);
0b2863ac
MW
2194 }
2195
bfe778ac 2196 kfree(sw->uuid);
72ee3390
MW
2197 kfree(sw->device_name);
2198 kfree(sw->vendor_name);
a25c8b2f 2199 kfree(sw->ports);
343fcb8c 2200 kfree(sw->drom);
f67cf491 2201 kfree(sw->key);
a25c8b2f
AN
2202 kfree(sw);
2203}
2204
2f608ba1
MW
2205static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2206{
2207 struct tb_switch *sw = tb_to_switch(dev);
2208 const char *type;
2209
2210 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2211 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2212 return -ENOMEM;
2213 }
2214
2215 if (!tb_route(sw)) {
2216 type = "host";
2217 } else {
2218 const struct tb_port *port;
2219 bool hub = false;
2220
2221 /* Device is hub if it has any downstream ports */
2222 tb_switch_for_each_port(sw, port) {
2223 if (!port->disabled && !tb_is_upstream_port(port) &&
2224 tb_port_is_null(port)) {
2225 hub = true;
2226 break;
2227 }
2228 }
2229
2230 type = hub ? "hub" : "device";
2231 }
2232
2233 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2234 return -ENOMEM;
2235 return 0;
2236}
2237
2d8ff0b5
MW
2238/*
2239 * Currently only need to provide the callbacks. Everything else is handled
2240 * in the connection manager.
2241 */
2242static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2243{
4f7c2e0d
MW
2244 struct tb_switch *sw = tb_to_switch(dev);
2245 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2246
2247 if (cm_ops->runtime_suspend_switch)
2248 return cm_ops->runtime_suspend_switch(sw);
2249
2d8ff0b5
MW
2250 return 0;
2251}
2252
2253static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2254{
4f7c2e0d
MW
2255 struct tb_switch *sw = tb_to_switch(dev);
2256 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2257
2258 if (cm_ops->runtime_resume_switch)
2259 return cm_ops->runtime_resume_switch(sw);
2d8ff0b5
MW
2260 return 0;
2261}
2262
2263static const struct dev_pm_ops tb_switch_pm_ops = {
2264 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2265 NULL)
2266};
2267
bfe778ac
MW
2268struct device_type tb_switch_type = {
2269 .name = "thunderbolt_device",
2270 .release = tb_switch_release,
2f608ba1 2271 .uevent = tb_switch_uevent,
2d8ff0b5 2272 .pm = &tb_switch_pm_ops,
bfe778ac
MW
2273};
2274
2c3c4197
MW
2275static int tb_switch_get_generation(struct tb_switch *sw)
2276{
2277 switch (sw->config.device_id) {
2278 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2279 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2280 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2281 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2282 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2283 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2284 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2285 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2286 return 1;
2287
2288 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2289 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2290 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2291 return 2;
2292
2293 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2294 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2295 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2296 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2297 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
4bac471d
RM
2298 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2299 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2300 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
3cdb9446
MW
2301 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2302 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2c3c4197
MW
2303 return 3;
2304
2305 default:
b0407983
MW
2306 if (tb_switch_is_usb4(sw))
2307 return 4;
2308
2c3c4197
MW
2309 /*
2310 * For unknown switches assume generation to be 1 to be
2311 * on the safe side.
2312 */
2313 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2314 sw->config.device_id);
2315 return 1;
2316 }
2317}
2318
b0407983
MW
2319static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2320{
2321 int max_depth;
2322
2323 if (tb_switch_is_usb4(sw) ||
2324 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2325 max_depth = USB4_SWITCH_MAX_DEPTH;
2326 else
2327 max_depth = TB_SWITCH_MAX_DEPTH;
2328
2329 return depth > max_depth;
2330}
2331
a25c8b2f 2332/**
bfe778ac
MW
2333 * tb_switch_alloc() - allocate a switch
2334 * @tb: Pointer to the owning domain
2335 * @parent: Parent device for this switch
2336 * @route: Route string for this switch
a25c8b2f 2337 *
bfe778ac
MW
2338 * Allocates and initializes a switch. Will not upload configuration to
2339 * the switch. For that you need to call tb_switch_configure()
2340 * separately. The returned switch should be released by calling
2341 * tb_switch_put().
2342 *
444ac384
MW
2343 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2344 * failure.
a25c8b2f 2345 */
bfe778ac
MW
2346struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2347 u64 route)
a25c8b2f 2348{
a25c8b2f 2349 struct tb_switch *sw;
f0342e75 2350 int upstream_port;
444ac384 2351 int i, ret, depth;
f0342e75 2352
b0407983
MW
2353 /* Unlock the downstream port so we can access the switch below */
2354 if (route) {
2355 struct tb_switch *parent_sw = tb_to_switch(parent);
2356 struct tb_port *down;
2357
2358 down = tb_port_at(route, parent_sw);
2359 tb_port_unlock(down);
2360 }
2361
f0342e75 2362 depth = tb_route_length(route);
f0342e75
MW
2363
2364 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
a25c8b2f 2365 if (upstream_port < 0)
444ac384 2366 return ERR_PTR(upstream_port);
a25c8b2f
AN
2367
2368 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2369 if (!sw)
444ac384 2370 return ERR_PTR(-ENOMEM);
a25c8b2f
AN
2371
2372 sw->tb = tb;
444ac384
MW
2373 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2374 if (ret)
bfe778ac
MW
2375 goto err_free_sw_ports;
2376
b0407983
MW
2377 sw->generation = tb_switch_get_generation(sw);
2378
daa5140f 2379 tb_dbg(tb, "current switch config:\n");
b0407983 2380 tb_dump_switch(tb, sw);
a25c8b2f
AN
2381
2382 /* configure switch */
2383 sw->config.upstream_port_number = upstream_port;
f0342e75
MW
2384 sw->config.depth = depth;
2385 sw->config.route_hi = upper_32_bits(route);
2386 sw->config.route_lo = lower_32_bits(route);
bfe778ac 2387 sw->config.enabled = 0;
a25c8b2f 2388
b0407983 2389 /* Make sure we do not exceed maximum topology limit */
704a940d
CIK
2390 if (tb_switch_exceeds_max_depth(sw, depth)) {
2391 ret = -EADDRNOTAVAIL;
2392 goto err_free_sw_ports;
2393 }
b0407983 2394
a25c8b2f
AN
2395 /* initialize ports */
2396 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
343fcb8c 2397 GFP_KERNEL);
444ac384
MW
2398 if (!sw->ports) {
2399 ret = -ENOMEM;
bfe778ac 2400 goto err_free_sw_ports;
444ac384 2401 }
a25c8b2f
AN
2402
2403 for (i = 0; i <= sw->config.max_port_number; i++) {
343fcb8c
AN
2404 /* minimum setup for tb_find_cap and tb_drom_read to work */
2405 sw->ports[i].sw = sw;
2406 sw->ports[i].port = i;
781e14ea
MW
2407
2408 /* Control port does not need HopID allocation */
2409 if (i) {
2410 ida_init(&sw->ports[i].in_hopids);
2411 ida_init(&sw->ports[i].out_hopids);
2412 }
a25c8b2f
AN
2413 }
2414
444ac384 2415 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
b0407983
MW
2416 if (ret > 0)
2417 sw->cap_plug_events = ret;
ca389f71 2418
23ccd21c
GF
2419 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2420 if (ret > 0)
2421 sw->cap_vsec_tmu = ret;
2422
444ac384
MW
2423 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2424 if (ret > 0)
2425 sw->cap_lc = ret;
a9be5582 2426
43f977bc
GF
2427 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2428 if (ret > 0)
2429 sw->cap_lp = ret;
2430
f67cf491
MW
2431 /* Root switch is always authorized */
2432 if (!route)
2433 sw->authorized = true;
2434
bfe778ac
MW
2435 device_initialize(&sw->dev);
2436 sw->dev.parent = parent;
2437 sw->dev.bus = &tb_bus_type;
2438 sw->dev.type = &tb_switch_type;
2439 sw->dev.groups = switch_groups;
2440 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2441
2442 return sw;
2443
2444err_free_sw_ports:
2445 kfree(sw->ports);
2446 kfree(sw);
2447
444ac384 2448 return ERR_PTR(ret);
bfe778ac
MW
2449}
2450
e6b245cc
MW
2451/**
2452 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2453 * @tb: Pointer to the owning domain
2454 * @parent: Parent device for this switch
2455 * @route: Route string for this switch
2456 *
2457 * This creates a switch in safe mode. This means the switch pretty much
2458 * lacks all capabilities except DMA configuration port before it is
2459 * flashed with a valid NVM firmware.
2460 *
2461 * The returned switch must be released by calling tb_switch_put().
2462 *
444ac384 2463 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
e6b245cc
MW
2464 */
2465struct tb_switch *
2466tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2467{
2468 struct tb_switch *sw;
2469
2470 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2471 if (!sw)
444ac384 2472 return ERR_PTR(-ENOMEM);
e6b245cc
MW
2473
2474 sw->tb = tb;
2475 sw->config.depth = tb_route_length(route);
2476 sw->config.route_hi = upper_32_bits(route);
2477 sw->config.route_lo = lower_32_bits(route);
2478 sw->safe_mode = true;
2479
2480 device_initialize(&sw->dev);
2481 sw->dev.parent = parent;
2482 sw->dev.bus = &tb_bus_type;
2483 sw->dev.type = &tb_switch_type;
2484 sw->dev.groups = switch_groups;
2485 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2486
2487 return sw;
2488}
2489
bfe778ac
MW
2490/**
2491 * tb_switch_configure() - Uploads configuration to the switch
2492 * @sw: Switch to configure
2493 *
2494 * Call this function before the switch is added to the system. It will
2495 * upload configuration to the switch and makes it available for the
b0407983
MW
2496 * connection manager to use. Can be called to the switch again after
2497 * resume from low power states to re-initialize it.
bfe778ac
MW
2498 *
2499 * Return: %0 in case of success and negative errno in case of failure
2500 */
2501int tb_switch_configure(struct tb_switch *sw)
2502{
2503 struct tb *tb = sw->tb;
2504 u64 route;
2505 int ret;
2506
2507 route = tb_route(sw);
bfe778ac 2508
b0407983 2509 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
b2911a59 2510 sw->config.enabled ? "restoring" : "initializing", route,
b0407983 2511 tb_route_length(route), sw->config.upstream_port_number);
bfe778ac 2512
bfe778ac
MW
2513 sw->config.enabled = 1;
2514
b0407983
MW
2515 if (tb_switch_is_usb4(sw)) {
2516 /*
2517 * For USB4 devices, we need to program the CM version
2518 * accordingly so that it knows to expose all the
2519 * additional capabilities.
2520 */
2521 sw->config.cmuv = USB4_VERSION_1_0;
2522
2523 /* Enumerate the switch */
2524 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2525 ROUTER_CS_1, 4);
2526 if (ret)
2527 return ret;
bfe778ac 2528
b0407983 2529 ret = usb4_switch_setup(sw);
b0407983
MW
2530 } else {
2531 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2532 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2533 sw->config.vendor_id);
2534
2535 if (!sw->cap_plug_events) {
2536 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2537 return -ENODEV;
2538 }
2539
2540 /* Enumerate the switch */
2541 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2542 ROUTER_CS_1, 3);
b0407983 2543 }
e879a709
MW
2544 if (ret)
2545 return ret;
2546
bfe778ac
MW
2547 return tb_plug_events_active(sw, true);
2548}
2549
2cc12751 2550static int tb_switch_set_uuid(struct tb_switch *sw)
bfe778ac 2551{
b0407983 2552 bool uid = false;
bfe778ac 2553 u32 uuid[4];
a9be5582 2554 int ret;
bfe778ac
MW
2555
2556 if (sw->uuid)
a9be5582 2557 return 0;
bfe778ac 2558
b0407983
MW
2559 if (tb_switch_is_usb4(sw)) {
2560 ret = usb4_switch_read_uid(sw, &sw->uid);
2561 if (ret)
2562 return ret;
2563 uid = true;
2564 } else {
2565 /*
2566 * The newer controllers include fused UUID as part of
2567 * link controller specific registers
2568 */
2569 ret = tb_lc_read_uuid(sw, uuid);
2570 if (ret) {
2571 if (ret != -EINVAL)
2572 return ret;
2573 uid = true;
2574 }
2575 }
2576
2577 if (uid) {
bfe778ac
MW
2578 /*
2579 * ICM generates UUID based on UID and fills the upper
2580 * two words with ones. This is not strictly following
2581 * UUID format but we want to be compatible with it so
2582 * we do the same here.
2583 */
2584 uuid[0] = sw->uid & 0xffffffff;
2585 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2586 uuid[2] = 0xffffffff;
2587 uuid[3] = 0xffffffff;
2588 }
2589
2590 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2cc12751 2591 if (!sw->uuid)
a9be5582
MW
2592 return -ENOMEM;
2593 return 0;
bfe778ac
MW
2594}
2595
e6b245cc 2596static int tb_switch_add_dma_port(struct tb_switch *sw)
3e136768 2597{
e6b245cc
MW
2598 u32 status;
2599 int ret;
2600
3e136768 2601 switch (sw->generation) {
3e136768
MW
2602 case 2:
2603 /* Only root switch can be upgraded */
2604 if (tb_route(sw))
e6b245cc 2605 return 0;
7a7ebfa8 2606
df561f66 2607 fallthrough;
7a7ebfa8 2608 case 3:
661b1947 2609 case 4:
7a7ebfa8
MW
2610 ret = tb_switch_set_uuid(sw);
2611 if (ret)
2612 return ret;
3e136768
MW
2613 break;
2614
2615 default:
e6b245cc
MW
2616 /*
2617 * DMA port is the only thing available when the switch
2618 * is in safe mode.
2619 */
2620 if (!sw->safe_mode)
2621 return 0;
2622 break;
3e136768
MW
2623 }
2624
661b1947
MW
2625 if (sw->no_nvm_upgrade)
2626 return 0;
2627
2628 if (tb_switch_is_usb4(sw)) {
2629 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2630 if (ret)
2631 return ret;
2632
2633 if (status) {
2634 tb_sw_info(sw, "switch flash authentication failed\n");
2635 nvm_set_auth_status(sw, status);
2636 }
2637
2638 return 0;
2639 }
2640
3f415e5e 2641 /* Root switch DMA port requires running firmware */
f07a3608 2642 if (!tb_route(sw) && !tb_switch_is_icm(sw))
e6b245cc
MW
2643 return 0;
2644
3e136768 2645 sw->dma_port = dma_port_alloc(sw);
e6b245cc
MW
2646 if (!sw->dma_port)
2647 return 0;
2648
7a7ebfa8
MW
2649 /*
2650 * If there is status already set then authentication failed
2651 * when the dma_port_flash_update_auth() returned. Power cycling
2652 * is not needed (it was done already) so only thing we do here
2653 * is to unblock runtime PM of the root port.
2654 */
2655 nvm_get_auth_status(sw, &status);
2656 if (status) {
2657 if (!tb_route(sw))
b0407983 2658 nvm_authenticate_complete_dma_port(sw);
7a7ebfa8
MW
2659 return 0;
2660 }
2661
e6b245cc
MW
2662 /*
2663 * Check status of the previous flash authentication. If there
2664 * is one we need to power cycle the switch in any case to make
2665 * it functional again.
2666 */
2667 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2668 if (ret <= 0)
2669 return ret;
2670
1830b6ee
MW
2671 /* Now we can allow root port to suspend again */
2672 if (!tb_route(sw))
b0407983 2673 nvm_authenticate_complete_dma_port(sw);
1830b6ee 2674
e6b245cc
MW
2675 if (status) {
2676 tb_sw_info(sw, "switch flash authentication failed\n");
e6b245cc
MW
2677 nvm_set_auth_status(sw, status);
2678 }
2679
2680 tb_sw_info(sw, "power cycling the switch now\n");
2681 dma_port_power_cycle(sw->dma_port);
2682
2683 /*
2684 * We return error here which causes the switch adding failure.
2685 * It should appear back after power cycle is complete.
2686 */
2687 return -ESHUTDOWN;
3e136768
MW
2688}
2689
0d46c08d
MW
2690static void tb_switch_default_link_ports(struct tb_switch *sw)
2691{
2692 int i;
2693
42716425 2694 for (i = 1; i <= sw->config.max_port_number; i++) {
0d46c08d
MW
2695 struct tb_port *port = &sw->ports[i];
2696 struct tb_port *subordinate;
2697
2698 if (!tb_port_is_null(port))
2699 continue;
2700
2701 /* Check for the subordinate port */
2702 if (i == sw->config.max_port_number ||
2703 !tb_port_is_null(&sw->ports[i + 1]))
2704 continue;
2705
2706 /* Link them if not already done so (by DROM) */
2707 subordinate = &sw->ports[i + 1];
2708 if (!port->dual_link_port && !subordinate->dual_link_port) {
2709 port->link_nr = 0;
2710 port->dual_link_port = subordinate;
2711 subordinate->link_nr = 1;
2712 subordinate->dual_link_port = port;
2713
2714 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2715 port->port, subordinate->port);
2716 }
2717 }
2718}
2719
91c0c120
MW
2720static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2721{
2722 const struct tb_port *up = tb_upstream_port(sw);
2723
2724 if (!up->dual_link_port || !up->dual_link_port->remote)
2725 return false;
2726
b0407983
MW
2727 if (tb_switch_is_usb4(sw))
2728 return usb4_switch_lane_bonding_possible(sw);
91c0c120
MW
2729 return tb_lc_lane_bonding_possible(sw);
2730}
2731
2732static int tb_switch_update_link_attributes(struct tb_switch *sw)
2733{
2734 struct tb_port *up;
2735 bool change = false;
2736 int ret;
2737
2738 if (!tb_route(sw) || tb_switch_is_icm(sw))
2739 return 0;
2740
2741 up = tb_upstream_port(sw);
2742
2743 ret = tb_port_get_link_speed(up);
2744 if (ret < 0)
2745 return ret;
2746 if (sw->link_speed != ret)
2747 change = true;
2748 sw->link_speed = ret;
2749
2750 ret = tb_port_get_link_width(up);
2751 if (ret < 0)
2752 return ret;
2753 if (sw->link_width != ret)
2754 change = true;
2755 sw->link_width = ret;
2756
2757 /* Notify userspace that there is possible link attribute change */
2758 if (device_is_registered(&sw->dev) && change)
2759 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2760
2761 return 0;
2762}
2763
2764/**
2765 * tb_switch_lane_bonding_enable() - Enable lane bonding
2766 * @sw: Switch to enable lane bonding
2767 *
2768 * Connection manager can call this function to enable lane bonding of a
2769 * switch. If conditions are correct and both switches support the feature,
2770 * lanes are bonded. It is safe to call this to any switch.
2771 */
2772int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2773{
2774 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2775 struct tb_port *up, *down;
2776 u64 route = tb_route(sw);
2777 int ret;
2778
2779 if (!route)
2780 return 0;
2781
2782 if (!tb_switch_lane_bonding_possible(sw))
2783 return 0;
2784
2785 up = tb_upstream_port(sw);
2786 down = tb_port_at(route, parent);
2787
2788 if (!tb_port_is_width_supported(up, 2) ||
2789 !tb_port_is_width_supported(down, 2))
2790 return 0;
2791
2792 ret = tb_port_lane_bonding_enable(up);
2793 if (ret) {
2794 tb_port_warn(up, "failed to enable lane bonding\n");
2795 return ret;
2796 }
2797
2798 ret = tb_port_lane_bonding_enable(down);
2799 if (ret) {
2800 tb_port_warn(down, "failed to enable lane bonding\n");
2801 tb_port_lane_bonding_disable(up);
2802 return ret;
2803 }
2804
e7051bea
MW
2805 ret = tb_port_wait_for_link_width(down, 2, 100);
2806 if (ret) {
2807 tb_port_warn(down, "timeout enabling lane bonding\n");
2808 return ret;
2809 }
2810
69fea377
MW
2811 tb_port_update_credits(down);
2812 tb_port_update_credits(up);
91c0c120
MW
2813 tb_switch_update_link_attributes(sw);
2814
2815 tb_sw_dbg(sw, "lane bonding enabled\n");
2816 return ret;
2817}
2818
2819/**
2820 * tb_switch_lane_bonding_disable() - Disable lane bonding
2821 * @sw: Switch whose lane bonding to disable
2822 *
2823 * Disables lane bonding between @sw and parent. This can be called even
2824 * if lanes were not bonded originally.
2825 */
2826void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2827{
2828 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2829 struct tb_port *up, *down;
2830
2831 if (!tb_route(sw))
2832 return;
2833
2834 up = tb_upstream_port(sw);
2835 if (!up->bonded)
2836 return;
2837
2838 down = tb_port_at(tb_route(sw), parent);
2839
2840 tb_port_lane_bonding_disable(up);
2841 tb_port_lane_bonding_disable(down);
2842
e7051bea
MW
2843 /*
2844 * It is fine if we get other errors as the router might have
2845 * been unplugged.
2846 */
2847 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2848 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2849
69fea377
MW
2850 tb_port_update_credits(down);
2851 tb_port_update_credits(up);
91c0c120 2852 tb_switch_update_link_attributes(sw);
69fea377 2853
91c0c120
MW
2854 tb_sw_dbg(sw, "lane bonding disabled\n");
2855}
2856
de462039
MW
2857/**
2858 * tb_switch_configure_link() - Set link configured
2859 * @sw: Switch whose link is configured
2860 *
2861 * Sets the link upstream from @sw configured (from both ends) so that
2862 * it will not be disconnected when the domain exits sleep. Can be
2863 * called for any switch.
2864 *
2865 * It is recommended that this is called after lane bonding is enabled.
2866 *
2867 * Returns %0 on success and negative errno in case of error.
2868 */
2869int tb_switch_configure_link(struct tb_switch *sw)
2870{
e28178bf
MW
2871 struct tb_port *up, *down;
2872 int ret;
2873
de462039
MW
2874 if (!tb_route(sw) || tb_switch_is_icm(sw))
2875 return 0;
2876
e28178bf
MW
2877 up = tb_upstream_port(sw);
2878 if (tb_switch_is_usb4(up->sw))
2879 ret = usb4_port_configure(up);
2880 else
2881 ret = tb_lc_configure_port(up);
2882 if (ret)
2883 return ret;
2884
2885 down = up->remote;
2886 if (tb_switch_is_usb4(down->sw))
2887 return usb4_port_configure(down);
2888 return tb_lc_configure_port(down);
de462039
MW
2889}
2890
2891/**
2892 * tb_switch_unconfigure_link() - Unconfigure link
2893 * @sw: Switch whose link is unconfigured
2894 *
2895 * Sets the link unconfigured so the @sw will be disconnected if the
2896 * domain exists sleep.
2897 */
2898void tb_switch_unconfigure_link(struct tb_switch *sw)
2899{
e28178bf
MW
2900 struct tb_port *up, *down;
2901
de462039
MW
2902 if (sw->is_unplugged)
2903 return;
2904 if (!tb_route(sw) || tb_switch_is_icm(sw))
2905 return;
2906
e28178bf
MW
2907 up = tb_upstream_port(sw);
2908 if (tb_switch_is_usb4(up->sw))
2909 usb4_port_unconfigure(up);
2910 else
2911 tb_lc_unconfigure_port(up);
2912
2913 down = up->remote;
2914 if (tb_switch_is_usb4(down->sw))
2915 usb4_port_unconfigure(down);
de462039 2916 else
e28178bf 2917 tb_lc_unconfigure_port(down);
de462039
MW
2918}
2919
56ad3aef
MW
2920static void tb_switch_credits_init(struct tb_switch *sw)
2921{
2922 if (tb_switch_is_icm(sw))
2923 return;
2924 if (!tb_switch_is_usb4(sw))
2925 return;
2926 if (usb4_switch_credits_init(sw))
2927 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2928}
2929
bfe778ac
MW
2930/**
2931 * tb_switch_add() - Add a switch to the domain
2932 * @sw: Switch to add
2933 *
2934 * This is the last step in adding switch to the domain. It will read
2935 * identification information from DROM and initializes ports so that
2936 * they can be used to connect other switches. The switch will be
2937 * exposed to the userspace when this function successfully returns. To
2938 * remove and release the switch, call tb_switch_remove().
2939 *
2940 * Return: %0 in case of success and negative errno in case of failure
2941 */
2942int tb_switch_add(struct tb_switch *sw)
2943{
2944 int i, ret;
2945
3e136768
MW
2946 /*
2947 * Initialize DMA control port now before we read DROM. Recent
2948 * host controllers have more complete DROM on NVM that includes
2949 * vendor and model identification strings which we then expose
2950 * to the userspace. NVM can be accessed through DMA
2951 * configuration based mailbox.
2952 */
e6b245cc 2953 ret = tb_switch_add_dma_port(sw);
af99f696
MW
2954 if (ret) {
2955 dev_err(&sw->dev, "failed to add DMA port\n");
f53e7676 2956 return ret;
af99f696 2957 }
343fcb8c 2958
e6b245cc 2959 if (!sw->safe_mode) {
56ad3aef
MW
2960 tb_switch_credits_init(sw);
2961
e6b245cc
MW
2962 /* read drom */
2963 ret = tb_drom_read(sw);
6915812b
ML
2964 if (ret)
2965 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
daa5140f 2966 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
bfe778ac 2967
e23a5afd
MW
2968 tb_check_quirks(sw);
2969
2cc12751 2970 ret = tb_switch_set_uuid(sw);
af99f696
MW
2971 if (ret) {
2972 dev_err(&sw->dev, "failed to set UUID\n");
2cc12751 2973 return ret;
af99f696 2974 }
e6b245cc
MW
2975
2976 for (i = 0; i <= sw->config.max_port_number; i++) {
2977 if (sw->ports[i].disabled) {
daa5140f 2978 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
e6b245cc
MW
2979 continue;
2980 }
2981 ret = tb_init_port(&sw->ports[i]);
af99f696
MW
2982 if (ret) {
2983 dev_err(&sw->dev, "failed to initialize port %d\n", i);
e6b245cc 2984 return ret;
af99f696 2985 }
343fcb8c 2986 }
91c0c120 2987
0d46c08d
MW
2988 tb_switch_default_link_ports(sw);
2989
91c0c120
MW
2990 ret = tb_switch_update_link_attributes(sw);
2991 if (ret)
2992 return ret;
cf29b9af
RM
2993
2994 ret = tb_switch_tmu_init(sw);
2995 if (ret)
2996 return ret;
343fcb8c
AN
2997 }
2998
e6b245cc 2999 ret = device_add(&sw->dev);
af99f696
MW
3000 if (ret) {
3001 dev_err(&sw->dev, "failed to add device: %d\n", ret);
e6b245cc 3002 return ret;
af99f696 3003 }
e6b245cc 3004
a83bc4a5
MW
3005 if (tb_route(sw)) {
3006 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3007 sw->vendor, sw->device);
3008 if (sw->vendor_name && sw->device_name)
3009 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3010 sw->device_name);
3011 }
3012
cae5f515
MW
3013 ret = usb4_switch_add_ports(sw);
3014 if (ret) {
3015 dev_err(&sw->dev, "failed to add USB4 ports\n");
3016 goto err_del;
3017 }
3018
e6b245cc 3019 ret = tb_switch_nvm_add(sw);
2d8ff0b5 3020 if (ret) {
af99f696 3021 dev_err(&sw->dev, "failed to add NVM devices\n");
cae5f515 3022 goto err_ports;
2d8ff0b5 3023 }
e6b245cc 3024
b2911a59
MW
3025 /*
3026 * Thunderbolt routers do not generate wakeups themselves but
3027 * they forward wakeups from tunneled protocols, so enable it
3028 * here.
3029 */
3030 device_init_wakeup(&sw->dev, true);
3031
2d8ff0b5
MW
3032 pm_runtime_set_active(&sw->dev);
3033 if (sw->rpm) {
3034 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3035 pm_runtime_use_autosuspend(&sw->dev);
3036 pm_runtime_mark_last_busy(&sw->dev);
3037 pm_runtime_enable(&sw->dev);
3038 pm_request_autosuspend(&sw->dev);
3039 }
3040
54e41810 3041 tb_switch_debugfs_init(sw);
2d8ff0b5 3042 return 0;
cae5f515
MW
3043
3044err_ports:
3045 usb4_switch_remove_ports(sw);
3046err_del:
3047 device_del(&sw->dev);
3048
3049 return ret;
bfe778ac 3050}
c90553b3 3051
bfe778ac
MW
3052/**
3053 * tb_switch_remove() - Remove and release a switch
3054 * @sw: Switch to remove
3055 *
3056 * This will remove the switch from the domain and release it after last
3057 * reference count drops to zero. If there are switches connected below
3058 * this switch, they will be removed as well.
3059 */
3060void tb_switch_remove(struct tb_switch *sw)
3061{
b433d010 3062 struct tb_port *port;
ca389f71 3063
54e41810
GF
3064 tb_switch_debugfs_remove(sw);
3065
2d8ff0b5
MW
3066 if (sw->rpm) {
3067 pm_runtime_get_sync(&sw->dev);
3068 pm_runtime_disable(&sw->dev);
3069 }
3070
bfe778ac 3071 /* port 0 is the switch itself and never has a remote */
b433d010
MW
3072 tb_switch_for_each_port(sw, port) {
3073 if (tb_port_has_remote(port)) {
3074 tb_switch_remove(port->remote->sw);
3075 port->remote = NULL;
3076 } else if (port->xdomain) {
3077 tb_xdomain_remove(port->xdomain);
3078 port->xdomain = NULL;
dfe40ca4 3079 }
dacb1287
KK
3080
3081 /* Remove any downstream retimers */
3082 tb_retimer_remove_all(port);
bfe778ac
MW
3083 }
3084
3085 if (!sw->is_unplugged)
3086 tb_plug_events_active(sw, false);
b0407983 3087
e6b245cc 3088 tb_switch_nvm_remove(sw);
cae5f515 3089 usb4_switch_remove_ports(sw);
a83bc4a5
MW
3090
3091 if (tb_route(sw))
3092 dev_info(&sw->dev, "device disconnected\n");
bfe778ac 3093 device_unregister(&sw->dev);
a25c8b2f
AN
3094}
3095
053596d9 3096/**
aae20bb6 3097 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
5c6b471b 3098 * @sw: Router to mark unplugged
053596d9 3099 */
aae20bb6 3100void tb_sw_set_unplugged(struct tb_switch *sw)
053596d9 3101{
b433d010
MW
3102 struct tb_port *port;
3103
053596d9
AN
3104 if (sw == sw->tb->root_switch) {
3105 tb_sw_WARN(sw, "cannot unplug root switch\n");
3106 return;
3107 }
3108 if (sw->is_unplugged) {
3109 tb_sw_WARN(sw, "is_unplugged already set\n");
3110 return;
3111 }
3112 sw->is_unplugged = true;
b433d010
MW
3113 tb_switch_for_each_port(sw, port) {
3114 if (tb_port_has_remote(port))
3115 tb_sw_set_unplugged(port->remote->sw);
3116 else if (port->xdomain)
3117 port->xdomain->is_unplugged = true;
053596d9
AN
3118 }
3119}
3120
b2911a59
MW
3121static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3122{
3123 if (flags)
3124 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3125 else
3126 tb_sw_dbg(sw, "disabling wakeup\n");
3127
3128 if (tb_switch_is_usb4(sw))
3129 return usb4_switch_set_wake(sw, flags);
3130 return tb_lc_set_wake(sw, flags);
3131}
3132
23dd5bb4
AN
3133int tb_switch_resume(struct tb_switch *sw)
3134{
b433d010
MW
3135 struct tb_port *port;
3136 int err;
3137
daa5140f 3138 tb_sw_dbg(sw, "resuming switch\n");
23dd5bb4 3139
08a5e4ce
MW
3140 /*
3141 * Check for UID of the connected switches except for root
3142 * switch which we assume cannot be removed.
3143 */
3144 if (tb_route(sw)) {
3145 u64 uid;
3146
7ea4cd6b
MW
3147 /*
3148 * Check first that we can still read the switch config
3149 * space. It may be that there is now another domain
3150 * connected.
3151 */
3152 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3153 if (err < 0) {
3154 tb_sw_info(sw, "switch not present anymore\n");
3155 return err;
3156 }
3157
a283de3e
ML
3158 /* We don't have any way to confirm this was the same device */
3159 if (!sw->uid)
3160 return -ENODEV;
3161
b0407983
MW
3162 if (tb_switch_is_usb4(sw))
3163 err = usb4_switch_read_uid(sw, &uid);
3164 else
3165 err = tb_drom_read_uid_only(sw, &uid);
08a5e4ce
MW
3166 if (err) {
3167 tb_sw_warn(sw, "uid read failed\n");
3168 return err;
3169 }
3170 if (sw->uid != uid) {
3171 tb_sw_info(sw,
3172 "changed while suspended (uid %#llx -> %#llx)\n",
3173 sw->uid, uid);
3174 return -ENODEV;
3175 }
23dd5bb4
AN
3176 }
3177
b0407983 3178 err = tb_switch_configure(sw);
23dd5bb4
AN
3179 if (err)
3180 return err;
3181
b2911a59
MW
3182 /* Disable wakes */
3183 tb_switch_set_wake(sw, 0);
3184
8145c435
MW
3185 err = tb_switch_tmu_init(sw);
3186 if (err)
3187 return err;
3188
23dd5bb4 3189 /* check for surviving downstream switches */
b433d010 3190 tb_switch_for_each_port(sw, port) {
3fb10ea4
RM
3191 if (!tb_port_is_null(port))
3192 continue;
3193
3194 if (!tb_port_resume(port))
23dd5bb4 3195 continue;
dfe40ca4 3196
7ea4cd6b 3197 if (tb_wait_for_port(port, true) <= 0) {
23dd5bb4
AN
3198 tb_port_warn(port,
3199 "lost during suspend, disconnecting\n");
7ea4cd6b
MW
3200 if (tb_port_has_remote(port))
3201 tb_sw_set_unplugged(port->remote->sw);
3202 else if (port->xdomain)
3203 port->xdomain->is_unplugged = true;
3fb10ea4 3204 } else {
b0407983
MW
3205 /*
3206 * Always unlock the port so the downstream
3207 * switch/domain is accessible.
3208 */
3209 if (tb_port_unlock(port))
3210 tb_port_warn(port, "failed to unlock port\n");
3211 if (port->remote && tb_switch_resume(port->remote->sw)) {
7ea4cd6b
MW
3212 tb_port_warn(port,
3213 "lost during suspend, disconnecting\n");
3214 tb_sw_set_unplugged(port->remote->sw);
3215 }
23dd5bb4
AN
3216 }
3217 }
3218 return 0;
3219}
3220
6ac6faee
MW
3221/**
3222 * tb_switch_suspend() - Put a switch to sleep
3223 * @sw: Switch to suspend
3224 * @runtime: Is this runtime suspend or system sleep
3225 *
3226 * Suspends router and all its children. Enables wakes according to
3227 * value of @runtime and then sets sleep bit for the router. If @sw is
3228 * host router the domain is ready to go to sleep once this function
3229 * returns.
3230 */
3231void tb_switch_suspend(struct tb_switch *sw, bool runtime)
23dd5bb4 3232{
b2911a59 3233 unsigned int flags = 0;
b433d010
MW
3234 struct tb_port *port;
3235 int err;
3236
6ac6faee
MW
3237 tb_sw_dbg(sw, "suspending switch\n");
3238
43f977bc
GF
3239 /*
3240 * Actually only needed for Titan Ridge but for simplicity can be
3241 * done for USB4 device too as CLx is re-enabled at resume.
b017a46d 3242 * CL0s and CL1 are enabled and supported together.
43f977bc 3243 */
b017a46d
GF
3244 if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
3245 if (tb_switch_disable_clx(sw, TB_CL1))
3246 tb_sw_warn(sw, "failed to disable %s on upstream port\n",
3247 tb_switch_clx_name(TB_CL1));
418a5a3d 3248 }
43f977bc 3249
23dd5bb4
AN
3250 err = tb_plug_events_active(sw, false);
3251 if (err)
3252 return;
3253
b433d010
MW
3254 tb_switch_for_each_port(sw, port) {
3255 if (tb_port_has_remote(port))
6ac6faee 3256 tb_switch_suspend(port->remote->sw, runtime);
23dd5bb4 3257 }
5480dfc2 3258
6ac6faee
MW
3259 if (runtime) {
3260 /* Trigger wake when something is plugged in/out */
3261 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
6026b703
MW
3262 flags |= TB_WAKE_ON_USB4;
3263 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
6ac6faee
MW
3264 } else if (device_may_wakeup(&sw->dev)) {
3265 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3266 }
b2911a59
MW
3267
3268 tb_switch_set_wake(sw, flags);
3269
b0407983
MW
3270 if (tb_switch_is_usb4(sw))
3271 usb4_switch_set_sleep(sw);
3272 else
3273 tb_lc_set_sleep(sw);
23dd5bb4 3274}
f67cf491 3275
8afe909b
MW
3276/**
3277 * tb_switch_query_dp_resource() - Query availability of DP resource
3278 * @sw: Switch whose DP resource is queried
3279 * @in: DP IN port
3280 *
3281 * Queries availability of DP resource for DP tunneling using switch
3282 * specific means. Returns %true if resource is available.
3283 */
3284bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3285{
b0407983
MW
3286 if (tb_switch_is_usb4(sw))
3287 return usb4_switch_query_dp_resource(sw, in);
8afe909b
MW
3288 return tb_lc_dp_sink_query(sw, in);
3289}
3290
3291/**
3292 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3293 * @sw: Switch whose DP resource is allocated
3294 * @in: DP IN port
3295 *
3296 * Allocates DP resource for DP tunneling. The resource must be
3297 * available for this to succeed (see tb_switch_query_dp_resource()).
3298 * Returns %0 in success and negative errno otherwise.
3299 */
3300int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3301{
ce05b997
MW
3302 int ret;
3303
b0407983 3304 if (tb_switch_is_usb4(sw))
ce05b997
MW
3305 ret = usb4_switch_alloc_dp_resource(sw, in);
3306 else
3307 ret = tb_lc_dp_sink_alloc(sw, in);
3308
3309 if (ret)
3310 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3311 in->port);
3312 else
3313 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3314
3315 return ret;
8afe909b
MW
3316}
3317
3318/**
3319 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3320 * @sw: Switch whose DP resource is de-allocated
3321 * @in: DP IN port
3322 *
3323 * De-allocates DP resource that was previously allocated for DP
3324 * tunneling.
3325 */
3326void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3327{
b0407983
MW
3328 int ret;
3329
3330 if (tb_switch_is_usb4(sw))
3331 ret = usb4_switch_dealloc_dp_resource(sw, in);
3332 else
3333 ret = tb_lc_dp_sink_dealloc(sw, in);
3334
3335 if (ret)
8afe909b
MW
3336 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3337 in->port);
ce05b997
MW
3338 else
3339 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
8afe909b
MW
3340}
3341
f67cf491
MW
3342struct tb_sw_lookup {
3343 struct tb *tb;
3344 u8 link;
3345 u8 depth;
7c39ffe7 3346 const uuid_t *uuid;
8e9267bb 3347 u64 route;
f67cf491
MW
3348};
3349
418e3ea1 3350static int tb_switch_match(struct device *dev, const void *data)
f67cf491
MW
3351{
3352 struct tb_switch *sw = tb_to_switch(dev);
418e3ea1 3353 const struct tb_sw_lookup *lookup = data;
f67cf491
MW
3354
3355 if (!sw)
3356 return 0;
3357 if (sw->tb != lookup->tb)
3358 return 0;
3359
3360 if (lookup->uuid)
3361 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3362
8e9267bb
RM
3363 if (lookup->route) {
3364 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3365 sw->config.route_hi == upper_32_bits(lookup->route);
3366 }
3367
f67cf491
MW
3368 /* Root switch is matched only by depth */
3369 if (!lookup->depth)
3370 return !sw->depth;
3371
3372 return sw->link == lookup->link && sw->depth == lookup->depth;
3373}
3374
3375/**
3376 * tb_switch_find_by_link_depth() - Find switch by link and depth
3377 * @tb: Domain the switch belongs
3378 * @link: Link number the switch is connected
3379 * @depth: Depth of the switch in link
3380 *
3381 * Returned switch has reference count increased so the caller needs to
3382 * call tb_switch_put() when done with the switch.
3383 */
3384struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3385{
3386 struct tb_sw_lookup lookup;
3387 struct device *dev;
3388
3389 memset(&lookup, 0, sizeof(lookup));
3390 lookup.tb = tb;
3391 lookup.link = link;
3392 lookup.depth = depth;
3393
3394 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3395 if (dev)
3396 return tb_to_switch(dev);
3397
3398 return NULL;
3399}
3400
3401/**
432019d6 3402 * tb_switch_find_by_uuid() - Find switch by UUID
f67cf491
MW
3403 * @tb: Domain the switch belongs
3404 * @uuid: UUID to look for
3405 *
3406 * Returned switch has reference count increased so the caller needs to
3407 * call tb_switch_put() when done with the switch.
3408 */
7c39ffe7 3409struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
f67cf491
MW
3410{
3411 struct tb_sw_lookup lookup;
3412 struct device *dev;
3413
3414 memset(&lookup, 0, sizeof(lookup));
3415 lookup.tb = tb;
3416 lookup.uuid = uuid;
3417
3418 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3419 if (dev)
3420 return tb_to_switch(dev);
3421
3422 return NULL;
3423}
e6b245cc 3424
8e9267bb
RM
3425/**
3426 * tb_switch_find_by_route() - Find switch by route string
3427 * @tb: Domain the switch belongs
3428 * @route: Route string to look for
3429 *
3430 * Returned switch has reference count increased so the caller needs to
3431 * call tb_switch_put() when done with the switch.
3432 */
3433struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3434{
3435 struct tb_sw_lookup lookup;
3436 struct device *dev;
3437
3438 if (!route)
3439 return tb_switch_get(tb->root_switch);
3440
3441 memset(&lookup, 0, sizeof(lookup));
3442 lookup.tb = tb;
3443 lookup.route = route;
3444
3445 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3446 if (dev)
3447 return tb_to_switch(dev);
3448
3449 return NULL;
3450}
3451
386e5e29
MW
3452/**
3453 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3454 * @sw: Switch to find the port from
3455 * @type: Port type to look for
3456 */
3457struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3458 enum tb_port_type type)
3459{
3460 struct tb_port *port;
3461
3462 tb_switch_for_each_port(sw, port) {
3463 if (port->config.type == type)
3464 return port;
3465 }
3466
3467 return NULL;
3468}
8a90e4fa 3469
8a90e4fa
GF
3470static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3471{
3472 struct tb_switch *parent = tb_switch_parent(sw);
3473 struct tb_port *up, *down;
3474 int ret;
3475
3476 if (!tb_route(sw))
3477 return 0;
3478
3479 up = tb_upstream_port(sw);
3480 down = tb_port_at(tb_route(sw), parent);
3481 ret = tb_port_pm_secondary_enable(up);
3482 if (ret)
3483 return ret;
3484
3485 return tb_port_pm_secondary_disable(down);
3486}
3487
b017a46d 3488static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
8a90e4fa
GF
3489{
3490 struct tb_switch *parent = tb_switch_parent(sw);
b017a46d 3491 bool up_clx_support, down_clx_support;
8a90e4fa
GF
3492 struct tb_port *up, *down;
3493 int ret;
3494
43f977bc 3495 if (!tb_switch_is_clx_supported(sw))
8a90e4fa
GF
3496 return 0;
3497
3498 /*
3499 * Enable CLx for host router's downstream port as part of the
3500 * downstream router enabling procedure.
3501 */
3502 if (!tb_route(sw))
3503 return 0;
3504
3505 /* Enable CLx only for first hop router (depth = 1) */
3506 if (tb_route(parent))
3507 return 0;
3508
3509 ret = tb_switch_pm_secondary_resolve(sw);
3510 if (ret)
3511 return ret;
3512
3513 up = tb_upstream_port(sw);
3514 down = tb_port_at(tb_route(sw), parent);
3515
b017a46d
GF
3516 up_clx_support = tb_port_clx_supported(up, clx);
3517 down_clx_support = tb_port_clx_supported(down, clx);
8a90e4fa 3518
b017a46d
GF
3519 tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
3520 up_clx_support ? "" : "not ");
3521 tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
3522 down_clx_support ? "" : "not ");
8a90e4fa 3523
b017a46d 3524 if (!up_clx_support || !down_clx_support)
8a90e4fa
GF
3525 return -EOPNOTSUPP;
3526
b017a46d 3527 ret = tb_port_clx_enable(up, clx);
8a90e4fa
GF
3528 if (ret)
3529 return ret;
3530
b017a46d 3531 ret = tb_port_clx_enable(down, clx);
8a90e4fa 3532 if (ret) {
b017a46d 3533 tb_port_clx_disable(up, clx);
8a90e4fa
GF
3534 return ret;
3535 }
3536
43f977bc
GF
3537 ret = tb_switch_mask_clx_objections(sw);
3538 if (ret) {
b017a46d
GF
3539 tb_port_clx_disable(up, clx);
3540 tb_port_clx_disable(down, clx);
43f977bc
GF
3541 return ret;
3542 }
3543
b017a46d 3544 sw->clx = clx;
8a90e4fa 3545
b017a46d 3546 tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
8a90e4fa
GF
3547 return 0;
3548}
3549
3550/**
3551 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3552 * @sw: Router to enable CLx for
3553 * @clx: The CLx state to enable
3554 *
3555 * Enable CLx state only for first hop router. That is the most common
3556 * use-case, that is intended for better thermal management, and so helps
3557 * to improve performance. CLx is enabled only if both sides of the link
3558 * support CLx, and if both sides of the link are not configured as two
3559 * single lane links and only if the link is not inter-domain link. The
b4e08d5d 3560 * complete set of conditions is described in CM Guide 1.0 section 8.1.
8a90e4fa
GF
3561 *
3562 * Return: Returns 0 on success or an error code on failure.
3563 */
3564int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3565{
3566 struct tb_switch *root_sw = sw->tb->root_switch;
3567
fa487b2a
GF
3568 if (!clx_enabled)
3569 return 0;
3570
8a90e4fa
GF
3571 /*
3572 * CLx is not enabled and validated on Intel USB4 platforms before
3573 * Alder Lake.
3574 */
3575 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3576 return 0;
3577
3578 switch (clx) {
b017a46d
GF
3579 case TB_CL1:
3580 /* CL0s and CL1 are enabled and supported together */
3581 return __tb_switch_enable_clx(sw, clx);
8a90e4fa
GF
3582
3583 default:
3584 return -EOPNOTSUPP;
3585 }
3586}
3587
b017a46d 3588static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
8a90e4fa
GF
3589{
3590 struct tb_switch *parent = tb_switch_parent(sw);
3591 struct tb_port *up, *down;
3592 int ret;
3593
43f977bc 3594 if (!tb_switch_is_clx_supported(sw))
8a90e4fa
GF
3595 return 0;
3596
3597 /*
3598 * Disable CLx for host router's downstream port as part of the
3599 * downstream router enabling procedure.
3600 */
3601 if (!tb_route(sw))
3602 return 0;
3603
3604 /* Disable CLx only for first hop router (depth = 1) */
3605 if (tb_route(parent))
3606 return 0;
3607
3608 up = tb_upstream_port(sw);
3609 down = tb_port_at(tb_route(sw), parent);
b017a46d 3610 ret = tb_port_clx_disable(up, clx);
8a90e4fa
GF
3611 if (ret)
3612 return ret;
3613
b017a46d 3614 ret = tb_port_clx_disable(down, clx);
8a90e4fa
GF
3615 if (ret)
3616 return ret;
3617
3618 sw->clx = TB_CLX_DISABLE;
3619
b017a46d 3620 tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
8a90e4fa
GF
3621 return 0;
3622}
3623
3624/**
3625 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3626 * @sw: Router to disable CLx for
3627 * @clx: The CLx state to disable
3628 *
3629 * Return: Returns 0 on success or an error code on failure.
3630 */
3631int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3632{
fa487b2a
GF
3633 if (!clx_enabled)
3634 return 0;
3635
8a90e4fa 3636 switch (clx) {
b017a46d
GF
3637 case TB_CL1:
3638 /* CL0s and CL1 are enabled and supported together */
3639 return __tb_switch_disable_clx(sw, clx);
8a90e4fa
GF
3640
3641 default:
3642 return -EOPNOTSUPP;
3643 }
3644}
43f977bc
GF
3645
3646/**
3647 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3648 * @sw: Router to mask objections for
3649 *
3650 * Mask the objections coming from the second depth routers in order to
3651 * stop these objections from interfering with the CLx states of the first
3652 * depth link.
3653 */
3654int tb_switch_mask_clx_objections(struct tb_switch *sw)
3655{
3656 int up_port = sw->config.upstream_port_number;
3657 u32 offset, val[2], mask_obj, unmask_obj;
3658 int ret, i;
3659
3660 /* Only Titan Ridge of pre-USB4 devices support CLx states */
3661 if (!tb_switch_is_titan_ridge(sw))
3662 return 0;
3663
3664 if (!tb_route(sw))
3665 return 0;
3666
3667 /*
3668 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3669 * Port A consists of lane adapters 1,2 and
3670 * Port B consists of lane adapters 3,4
3671 * If upstream port is A, (lanes are 1,2), we mask objections from
3672 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3673 */
3674 if (up_port == 1) {
3675 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3676 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3677 offset = TB_LOW_PWR_C1_CL1;
3678 } else {
3679 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3680 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3681 offset = TB_LOW_PWR_C3_CL1;
3682 }
3683
3684 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3685 sw->cap_lp + offset, ARRAY_SIZE(val));
3686 if (ret)
3687 return ret;
3688
3689 for (i = 0; i < ARRAY_SIZE(val); i++) {
3690 val[i] |= mask_obj;
3691 val[i] &= ~unmask_obj;
3692 }
3693
3694 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3695 sw->cap_lp + offset, ARRAY_SIZE(val));
3696}
3697
3698/*
3699 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3700 * device. For now used only for Titan Ridge.
3701 */
3702static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3703 unsigned int pcie_offset, u32 value)
3704{
3705 u32 offset, command, val;
3706 int ret;
3707
3708 if (sw->generation != 3)
3709 return -EOPNOTSUPP;
3710
3711 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3712 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3713 if (ret)
3714 return ret;
3715
3716 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3717 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3718 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3719 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3720 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3721 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3722
3723 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3724
3725 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3726 if (ret)
3727 return ret;
3728
3729 ret = tb_switch_wait_for_bit(sw, offset,
3730 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3731 if (ret)
3732 return ret;
3733
3734 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3735 if (ret)
3736 return ret;
3737
3738 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3739 return -ETIMEDOUT;
3740
3741 return 0;
3742}
3743
3744/**
3745 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3746 * @sw: Router to enable PCIe L1
3747 *
3748 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3749 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3750 * was configured. Due to Intel platforms limitation, shall be called only
3751 * for first hop switch.
3752 */
3753int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3754{
3755 struct tb_switch *parent = tb_switch_parent(sw);
3756 int ret;
3757
3758 if (!tb_route(sw))
3759 return 0;
3760
3761 if (!tb_switch_is_titan_ridge(sw))
3762 return 0;
3763
3764 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3765 if (tb_route(parent))
3766 return 0;
3767
3768 /* Write to downstream PCIe bridge #5 aka Dn4 */
3769 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3770 if (ret)
3771 return ret;
3772
3773 /* Write to Upstream PCIe bridge #0 aka Up0 */
3774 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3775}
30a4eca6
MW
3776
3777/**
3778 * tb_switch_xhci_connect() - Connect internal xHCI
3779 * @sw: Router whose xHCI to connect
3780 *
3781 * Can be called to any router. For Alpine Ridge and Titan Ridge
3782 * performs special flows that bring the xHCI functional for any device
3783 * connected to the type-C port. Call only after PCIe tunnel has been
3784 * established. The function only does the connect if not done already
3785 * so can be called several times for the same router.
3786 */
3787int tb_switch_xhci_connect(struct tb_switch *sw)
3788{
3789 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3790 struct tb_port *port1, *port3;
3791 int ret;
3792
3793 port1 = &sw->ports[1];
3794 port3 = &sw->ports[3];
3795
3796 if (tb_switch_is_alpine_ridge(sw)) {
3797 usb_port1 = tb_lc_is_usb_plugged(port1);
3798 usb_port3 = tb_lc_is_usb_plugged(port3);
3799 xhci_port1 = tb_lc_is_xhci_connected(port1);
3800 xhci_port3 = tb_lc_is_xhci_connected(port3);
3801
3802 /* Figure out correct USB port to connect */
3803 if (usb_port1 && !xhci_port1) {
3804 ret = tb_lc_xhci_connect(port1);
3805 if (ret)
3806 return ret;
3807 }
3808 if (usb_port3 && !xhci_port3)
3809 return tb_lc_xhci_connect(port3);
3810 } else if (tb_switch_is_titan_ridge(sw)) {
3811 ret = tb_lc_xhci_connect(port1);
3812 if (ret)
3813 return ret;
3814 return tb_lc_xhci_connect(port3);
3815 }
3816
3817 return 0;
3818}
3819
3820/**
3821 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3822 * @sw: Router whose xHCI to disconnect
3823 *
3824 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3825 * ports.
3826 */
3827void tb_switch_xhci_disconnect(struct tb_switch *sw)
3828{
3829 if (sw->generation == 3) {
3830 struct tb_port *port1 = &sw->ports[1];
3831 struct tb_port *port3 = &sw->ports[3];
3832
3833 tb_lc_xhci_disconnect(port1);
3834 tb_port_dbg(port1, "disconnected xHCI\n");
3835 tb_lc_xhci_disconnect(port3);
3836 tb_port_dbg(port3, "disconnected xHCI\n");
3837 }
3838}