thunderbolt: Pass TX and RX data directly to usb4_switch_op()
[linux-block.git] / drivers / thunderbolt / usb4.c
CommitLineData
b0407983
MW
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/ktime.h>
12
02d12855 13#include "sb_regs.h"
b0407983
MW
14#include "tb.h"
15
16#define USB4_DATA_DWORDS 16
17#define USB4_DATA_RETRIES 3
18
19enum usb4_switch_op {
20 USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
21 USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
22 USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
23 USB4_SWITCH_OP_NVM_WRITE = 0x20,
24 USB4_SWITCH_OP_NVM_AUTH = 0x21,
25 USB4_SWITCH_OP_NVM_READ = 0x22,
26 USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
27 USB4_SWITCH_OP_DROM_READ = 0x24,
28 USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
29};
30
02d12855
RM
31enum usb4_sb_target {
32 USB4_SB_TARGET_ROUTER,
33 USB4_SB_TARGET_PARTNER,
34 USB4_SB_TARGET_RETIMER,
35};
36
b0407983
MW
37#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
38#define USB4_NVM_READ_OFFSET_SHIFT 2
39#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
40#define USB4_NVM_READ_LENGTH_SHIFT 24
41
42#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
43#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
44
45#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
46#define USB4_DROM_ADDRESS_SHIFT 2
47#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
48#define USB4_DROM_SIZE_SHIFT 15
49
50#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
51
7e72846b
MW
52typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
53typedef int (*write_block_fn)(void *, const void *, size_t);
b0407983
MW
54
55static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
56 u32 value, int timeout_msec)
57{
58 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
59
60 do {
61 u32 val;
62 int ret;
63
64 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
65 if (ret)
66 return ret;
67
68 if ((val & bit) == value)
69 return 0;
70
71 usleep_range(50, 100);
72 } while (ktime_before(ktime_get(), timeout));
73
74 return -ETIMEDOUT;
75}
76
7e72846b
MW
77static int usb4_do_read_data(u16 address, void *buf, size_t size,
78 read_block_fn read_block, void *read_block_data)
b0407983
MW
79{
80 unsigned int retries = USB4_DATA_RETRIES;
81 unsigned int offset;
82
83 offset = address & 3;
84 address = address & ~3;
85
86 do {
87 size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
88 unsigned int dwaddress, dwords;
89 u8 data[USB4_DATA_DWORDS * 4];
90 int ret;
91
92 dwaddress = address / 4;
93 dwords = ALIGN(nbytes, 4) / 4;
94
7e72846b 95 ret = read_block(read_block_data, dwaddress, data, dwords);
b0407983 96 if (ret) {
6bfe3347
MW
97 if (ret != -ENODEV && retries--)
98 continue;
b0407983
MW
99 return ret;
100 }
101
102 memcpy(buf, data + offset, nbytes);
103
104 size -= nbytes;
105 address += nbytes;
106 buf += nbytes;
107 } while (size > 0);
108
109 return 0;
110}
111
7e72846b
MW
112static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
113 write_block_fn write_next_block, void *write_block_data)
b0407983
MW
114{
115 unsigned int retries = USB4_DATA_RETRIES;
116 unsigned int offset;
117
118 offset = address & 3;
119 address = address & ~3;
120
121 do {
122 u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
123 u8 data[USB4_DATA_DWORDS * 4];
124 int ret;
125
126 memcpy(data + offset, buf, nbytes);
127
7e72846b 128 ret = write_next_block(write_block_data, data, nbytes / 4);
b0407983
MW
129 if (ret) {
130 if (ret == -ETIMEDOUT) {
131 if (retries--)
132 continue;
133 ret = -EIO;
134 }
135 return ret;
136 }
137
138 size -= nbytes;
139 address += nbytes;
140 buf += nbytes;
141 } while (size > 0);
142
143 return 0;
144}
145
83bab44a
MW
146static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
147 u8 *status, const void *tx_data, size_t tx_dwords,
148 void *rx_data, size_t rx_dwords)
b0407983
MW
149{
150 u32 val;
151 int ret;
152
83bab44a
MW
153 if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
154 return -EINVAL;
155
fe265a06
MW
156 if (metadata) {
157 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
158 if (ret)
159 return ret;
160 }
83bab44a
MW
161 if (tx_dwords) {
162 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
163 tx_dwords);
164 if (ret)
165 return ret;
166 }
fe265a06 167
b0407983
MW
168 val = opcode | ROUTER_CS_26_OV;
169 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
170 if (ret)
171 return ret;
172
173 ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
174 if (ret)
175 return ret;
176
177 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
c3bf9930
MW
178 if (ret)
179 return ret;
180
b0407983
MW
181 if (val & ROUTER_CS_26_ONS)
182 return -EOPNOTSUPP;
183
661b1947
MW
184 if (status)
185 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
186 ROUTER_CS_26_STATUS_SHIFT;
fe265a06 187
83bab44a
MW
188 if (metadata) {
189 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
190 if (ret)
191 return ret;
192 }
193 if (rx_dwords) {
194 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
195 rx_dwords);
196 if (ret)
197 return ret;
198 }
199
200 return 0;
201}
202
203static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
204 u32 *metadata, u8 *status)
205{
206 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
207}
208
209static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
210 u32 *metadata, u8 *status,
211 const void *tx_data, size_t tx_dwords,
212 void *rx_data, size_t rx_dwords)
213{
214 return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
215 tx_dwords, rx_data, rx_dwords);
b0407983
MW
216}
217
b2911a59
MW
218static void usb4_switch_check_wakes(struct tb_switch *sw)
219{
220 struct tb_port *port;
221 bool wakeup = false;
222 u32 val;
223
224 if (!device_may_wakeup(&sw->dev))
225 return;
226
227 if (tb_route(sw)) {
228 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
229 return;
230
231 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
232 (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
233 (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
234
235 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
236 }
237
238 /* Check for any connected downstream ports for USB4 wake */
239 tb_switch_for_each_port(sw, port) {
240 if (!tb_port_has_remote(port))
241 continue;
242
243 if (tb_port_read(port, &val, TB_CFG_PORT,
244 port->cap_usb4 + PORT_CS_18, 1))
245 break;
246
247 tb_port_dbg(port, "USB4 wake: %s\n",
248 (val & PORT_CS_18_WOU4S) ? "yes" : "no");
249
250 if (val & PORT_CS_18_WOU4S)
251 wakeup = true;
252 }
253
254 if (wakeup)
255 pm_wakeup_event(&sw->dev, 0);
256}
257
bbcf40b3
MW
258static bool link_is_usb4(struct tb_port *port)
259{
260 u32 val;
261
262 if (!port->cap_usb4)
263 return false;
264
265 if (tb_port_read(port, &val, TB_CFG_PORT,
266 port->cap_usb4 + PORT_CS_18, 1))
267 return false;
268
269 return !(val & PORT_CS_18_TCM);
270}
271
b0407983
MW
272/**
273 * usb4_switch_setup() - Additional setup for USB4 device
274 * @sw: USB4 router to setup
275 *
276 * USB4 routers need additional settings in order to enable all the
277 * tunneling. This function enables USB and PCIe tunneling if it can be
278 * enabled (e.g the parent switch also supports them). If USB tunneling
279 * is not available for some reason (like that there is Thunderbolt 3
280 * switch upstream) then the internal xHCI controller is enabled
281 * instead.
282 */
283int usb4_switch_setup(struct tb_switch *sw)
284{
bbcf40b3 285 struct tb_port *downstream_port;
b0407983
MW
286 struct tb_switch *parent;
287 bool tbt3, xhci;
288 u32 val = 0;
289 int ret;
290
b2911a59
MW
291 usb4_switch_check_wakes(sw);
292
b0407983
MW
293 if (!tb_route(sw))
294 return 0;
295
296 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
297 if (ret)
298 return ret;
299
bbcf40b3
MW
300 parent = tb_switch_parent(sw);
301 downstream_port = tb_port_at(tb_route(sw), parent);
302 sw->link_usb4 = link_is_usb4(downstream_port);
303 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
304
b0407983
MW
305 xhci = val & ROUTER_CS_6_HCI;
306 tbt3 = !(val & ROUTER_CS_6_TNS);
307
308 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
309 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
310
311 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
312 if (ret)
313 return ret;
314
bbcf40b3 315 if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
e6f81858
RM
316 val |= ROUTER_CS_5_UTO;
317 xhci = false;
318 }
319
b0407983
MW
320 /* Only enable PCIe tunneling if the parent router supports it */
321 if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
322 val |= ROUTER_CS_5_PTO;
e6f81858
RM
323 /*
324 * xHCI can be enabled if PCIe tunneling is supported
325 * and the parent does not have any USB3 dowstream
326 * adapters (so we cannot do USB 3.x tunneling).
327 */
c7a7ac84 328 if (xhci)
b0407983
MW
329 val |= ROUTER_CS_5_HCO;
330 }
331
332 /* TBT3 supported by the CM */
333 val |= ROUTER_CS_5_C3S;
334 /* Tunneling configuration is ready now */
335 val |= ROUTER_CS_5_CV;
336
337 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
338 if (ret)
339 return ret;
340
341 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
342 ROUTER_CS_6_CR, 50);
343}
344
345/**
346 * usb4_switch_read_uid() - Read UID from USB4 router
347 * @sw: USB4 router
21d78d86 348 * @uid: UID is stored here
b0407983
MW
349 *
350 * Reads 64-bit UID from USB4 router config space.
351 */
352int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
353{
354 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
355}
356
7e72846b 357static int usb4_switch_drom_read_block(void *data,
b0407983
MW
358 unsigned int dwaddress, void *buf,
359 size_t dwords)
360{
7e72846b 361 struct tb_switch *sw = data;
b0407983
MW
362 u8 status = 0;
363 u32 metadata;
364 int ret;
365
366 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
367 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
368 USB4_DROM_ADDRESS_MASK;
369
83bab44a
MW
370 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
371 &status, NULL, 0, buf, dwords);
b0407983
MW
372 if (ret)
373 return ret;
374
83bab44a 375 return status ? -EIO : 0;
b0407983
MW
376}
377
378/**
379 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
380 * @sw: USB4 router
21d78d86
MW
381 * @address: Byte address inside DROM to start reading
382 * @buf: Buffer where the DROM content is stored
383 * @size: Number of bytes to read from DROM
b0407983
MW
384 *
385 * Uses USB4 router operations to read router DROM. For devices this
386 * should always work but for hosts it may return %-EOPNOTSUPP in which
387 * case the host router does not have DROM.
388 */
389int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
390 size_t size)
391{
7e72846b
MW
392 return usb4_do_read_data(address, buf, size,
393 usb4_switch_drom_read_block, sw);
b0407983
MW
394}
395
b0407983
MW
396/**
397 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
398 * @sw: USB4 router
399 *
400 * Checks whether conditions are met so that lane bonding can be
401 * established with the upstream router. Call only for device routers.
402 */
403bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
404{
405 struct tb_port *up;
406 int ret;
407 u32 val;
408
409 up = tb_upstream_port(sw);
410 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
411 if (ret)
412 return false;
413
414 return !!(val & PORT_CS_18_BE);
415}
416
b2911a59
MW
417/**
418 * usb4_switch_set_wake() - Enabled/disable wake
419 * @sw: USB4 router
420 * @flags: Wakeup flags (%0 to disable)
421 *
422 * Enables/disables router to wake up from sleep.
423 */
424int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
425{
426 struct tb_port *port;
427 u64 route = tb_route(sw);
428 u32 val;
429 int ret;
430
431 /*
432 * Enable wakes coming from all USB4 downstream ports (from
433 * child routers). For device routers do this also for the
434 * upstream USB4 port.
435 */
436 tb_switch_for_each_port(sw, port) {
437 if (!route && tb_is_upstream_port(port))
438 continue;
439
440 ret = tb_port_read(port, &val, TB_CFG_PORT,
441 port->cap_usb4 + PORT_CS_19, 1);
442 if (ret)
443 return ret;
444
445 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
446
447 if (flags & TB_WAKE_ON_CONNECT)
448 val |= PORT_CS_19_WOC;
449 if (flags & TB_WAKE_ON_DISCONNECT)
450 val |= PORT_CS_19_WOD;
451 if (flags & TB_WAKE_ON_USB4)
452 val |= PORT_CS_19_WOU4;
453
454 ret = tb_port_write(port, &val, TB_CFG_PORT,
455 port->cap_usb4 + PORT_CS_19, 1);
456 if (ret)
457 return ret;
458 }
459
460 /*
461 * Enable wakes from PCIe and USB 3.x on this router. Only
462 * needed for device routers.
463 */
464 if (route) {
465 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
466 if (ret)
467 return ret;
468
469 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
470 if (flags & TB_WAKE_ON_USB3)
471 val |= ROUTER_CS_5_WOU;
472 if (flags & TB_WAKE_ON_PCIE)
473 val |= ROUTER_CS_5_WOP;
474
475 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
476 if (ret)
477 return ret;
478 }
479
480 return 0;
481}
482
b0407983
MW
483/**
484 * usb4_switch_set_sleep() - Prepare the router to enter sleep
485 * @sw: USB4 router
486 *
b2911a59
MW
487 * Sets sleep bit for the router. Returns when the router sleep ready
488 * bit has been asserted.
b0407983
MW
489 */
490int usb4_switch_set_sleep(struct tb_switch *sw)
491{
492 int ret;
493 u32 val;
494
495 /* Set sleep bit and wait for sleep ready to be asserted */
496 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
497 if (ret)
498 return ret;
499
500 val |= ROUTER_CS_5_SLP;
501
502 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
503 if (ret)
504 return ret;
505
506 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
507 ROUTER_CS_6_SLPR, 500);
508}
509
510/**
511 * usb4_switch_nvm_sector_size() - Return router NVM sector size
512 * @sw: USB4 router
513 *
514 * If the router supports NVM operations this function returns the NVM
515 * sector size in bytes. If NVM operations are not supported returns
516 * %-EOPNOTSUPP.
517 */
518int usb4_switch_nvm_sector_size(struct tb_switch *sw)
519{
520 u32 metadata;
521 u8 status;
522 int ret;
523
fe265a06
MW
524 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
525 &status);
b0407983
MW
526 if (ret)
527 return ret;
528
529 if (status)
530 return status == 0x2 ? -EOPNOTSUPP : -EIO;
531
b0407983
MW
532 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
533}
534
7e72846b 535static int usb4_switch_nvm_read_block(void *data,
b0407983
MW
536 unsigned int dwaddress, void *buf, size_t dwords)
537{
7e72846b 538 struct tb_switch *sw = data;
b0407983
MW
539 u8 status = 0;
540 u32 metadata;
541 int ret;
542
543 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
544 USB4_NVM_READ_LENGTH_MASK;
545 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
546 USB4_NVM_READ_OFFSET_MASK;
547
83bab44a
MW
548 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
549 &status, NULL, 0, buf, dwords);
b0407983
MW
550 if (ret)
551 return ret;
552
83bab44a 553 return status ? -EIO : 0;
b0407983
MW
554}
555
556/**
557 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
558 * @sw: USB4 router
559 * @address: Starting address in bytes
560 * @buf: Read data is placed here
561 * @size: How many bytes to read
562 *
563 * Reads NVM contents of the router. If NVM is not supported returns
564 * %-EOPNOTSUPP.
565 */
566int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
567 size_t size)
568{
7e72846b
MW
569 return usb4_do_read_data(address, buf, size,
570 usb4_switch_nvm_read_block, sw);
b0407983
MW
571}
572
573static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
574 unsigned int address)
575{
576 u32 metadata, dwaddress;
577 u8 status = 0;
578 int ret;
579
580 dwaddress = address / 4;
581 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
582 USB4_NVM_SET_OFFSET_MASK;
583
fe265a06
MW
584 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
585 &status);
b0407983
MW
586 if (ret)
587 return ret;
588
589 return status ? -EIO : 0;
590}
591
7e72846b
MW
592static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
593 size_t dwords)
b0407983 594{
7e72846b 595 struct tb_switch *sw = data;
b0407983
MW
596 u8 status;
597 int ret;
598
83bab44a
MW
599 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
600 buf, dwords, NULL, 0);
b0407983
MW
601 if (ret)
602 return ret;
603
604 return status ? -EIO : 0;
605}
606
607/**
608 * usb4_switch_nvm_write() - Write to the router NVM
609 * @sw: USB4 router
610 * @address: Start address where to write in bytes
611 * @buf: Pointer to the data to write
612 * @size: Size of @buf in bytes
613 *
614 * Writes @buf to the router NVM using USB4 router operations. If NVM
615 * write is not supported returns %-EOPNOTSUPP.
616 */
617int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
618 const void *buf, size_t size)
619{
620 int ret;
621
622 ret = usb4_switch_nvm_set_offset(sw, address);
623 if (ret)
624 return ret;
625
7e72846b
MW
626 return usb4_do_write_data(address, buf, size,
627 usb4_switch_nvm_write_next_block, sw);
b0407983
MW
628}
629
630/**
631 * usb4_switch_nvm_authenticate() - Authenticate new NVM
632 * @sw: USB4 router
633 *
634 * After the new NVM has been written via usb4_switch_nvm_write(), this
661b1947
MW
635 * function triggers NVM authentication process. The router gets power
636 * cycled and if the authentication is successful the new NVM starts
b0407983 637 * running. In case of failure returns negative errno.
661b1947
MW
638 *
639 * The caller should call usb4_switch_nvm_authenticate_status() to read
640 * the status of the authentication after power cycle. It should be the
641 * first router operation to avoid the status being lost.
b0407983
MW
642 */
643int usb4_switch_nvm_authenticate(struct tb_switch *sw)
644{
b0407983
MW
645 int ret;
646
fe265a06 647 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
661b1947
MW
648 switch (ret) {
649 /*
650 * The router is power cycled once NVM_AUTH is started so it is
651 * expected to get any of the following errors back.
652 */
653 case -EACCES:
654 case -ENOTCONN:
655 case -ETIMEDOUT:
656 return 0;
657
658 default:
659 return ret;
660 }
661}
662
663/**
664 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
665 * @sw: USB4 router
666 * @status: Status code of the operation
667 *
668 * The function checks if there is status available from the last NVM
669 * authenticate router operation. If there is status then %0 is returned
670 * and the status code is placed in @status. Returns negative errno in case
671 * of failure.
672 *
673 * Must be called before any other router operation.
674 */
675int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
676{
677 u16 opcode;
678 u32 val;
679 int ret;
680
681 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
b0407983
MW
682 if (ret)
683 return ret;
684
661b1947
MW
685 /* Check that the opcode is correct */
686 opcode = val & ROUTER_CS_26_OPCODE_MASK;
687 if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
688 if (val & ROUTER_CS_26_OV)
689 return -EBUSY;
690 if (val & ROUTER_CS_26_ONS)
691 return -EOPNOTSUPP;
692
693 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
694 ROUTER_CS_26_STATUS_SHIFT;
695 } else {
696 *status = 0;
b0407983 697 }
661b1947
MW
698
699 return 0;
b0407983
MW
700}
701
702/**
703 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
704 * @sw: USB4 router
705 * @in: DP IN adapter
706 *
707 * For DP tunneling this function can be used to query availability of
708 * DP IN resource. Returns true if the resource is available for DP
709 * tunneling, false otherwise.
710 */
711bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
712{
fe265a06 713 u32 metadata = in->port;
b0407983
MW
714 u8 status;
715 int ret;
716
fe265a06
MW
717 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
718 &status);
b0407983
MW
719 /*
720 * If DP resource allocation is not supported assume it is
721 * always available.
722 */
723 if (ret == -EOPNOTSUPP)
724 return true;
725 else if (ret)
726 return false;
727
728 return !status;
729}
730
731/**
732 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
733 * @sw: USB4 router
734 * @in: DP IN adapter
735 *
736 * Allocates DP IN resource for DP tunneling using USB4 router
737 * operations. If the resource was allocated returns %0. Otherwise
738 * returns negative errno, in particular %-EBUSY if the resource is
739 * already allocated.
740 */
741int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
742{
fe265a06 743 u32 metadata = in->port;
b0407983
MW
744 u8 status;
745 int ret;
746
fe265a06
MW
747 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
748 &status);
b0407983
MW
749 if (ret == -EOPNOTSUPP)
750 return 0;
751 else if (ret)
752 return ret;
753
754 return status ? -EBUSY : 0;
755}
756
757/**
758 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
759 * @sw: USB4 router
760 * @in: DP IN adapter
761 *
762 * Releases the previously allocated DP IN resource.
763 */
764int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
765{
fe265a06 766 u32 metadata = in->port;
b0407983
MW
767 u8 status;
768 int ret;
769
fe265a06
MW
770 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
771 &status);
b0407983
MW
772 if (ret == -EOPNOTSUPP)
773 return 0;
774 else if (ret)
775 return ret;
776
777 return status ? -EIO : 0;
778}
779
780static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
781{
782 struct tb_port *p;
783 int usb4_idx = 0;
784
785 /* Assume port is primary */
786 tb_switch_for_each_port(sw, p) {
787 if (!tb_port_is_null(p))
788 continue;
789 if (tb_is_upstream_port(p))
790 continue;
791 if (!p->link_nr) {
792 if (p == port)
793 break;
794 usb4_idx++;
795 }
796 }
797
798 return usb4_idx;
799}
800
801/**
802 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
803 * @sw: USB4 router
804 * @port: USB4 port
805 *
806 * USB4 routers have direct mapping between USB4 ports and PCIe
807 * downstream adapters where the PCIe topology is extended. This
808 * function returns the corresponding downstream PCIe adapter or %NULL
809 * if no such mapping was possible.
810 */
811struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
812 const struct tb_port *port)
813{
814 int usb4_idx = usb4_port_idx(sw, port);
815 struct tb_port *p;
816 int pcie_idx = 0;
817
818 /* Find PCIe down port matching usb4_port */
819 tb_switch_for_each_port(sw, p) {
820 if (!tb_port_is_pcie_down(p))
821 continue;
822
9cac51a0 823 if (pcie_idx == usb4_idx)
b0407983
MW
824 return p;
825
826 pcie_idx++;
827 }
828
829 return NULL;
830}
831
e6f81858
RM
832/**
833 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
834 * @sw: USB4 router
835 * @port: USB4 port
836 *
837 * USB4 routers have direct mapping between USB4 ports and USB 3.x
838 * downstream adapters where the USB 3.x topology is extended. This
839 * function returns the corresponding downstream USB 3.x adapter or
840 * %NULL if no such mapping was possible.
841 */
842struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
843 const struct tb_port *port)
844{
845 int usb4_idx = usb4_port_idx(sw, port);
846 struct tb_port *p;
847 int usb_idx = 0;
848
849 /* Find USB3 down port matching usb4_port */
850 tb_switch_for_each_port(sw, p) {
851 if (!tb_port_is_usb3_down(p))
852 continue;
853
77cfa40f 854 if (usb_idx == usb4_idx)
e6f81858
RM
855 return p;
856
857 usb_idx++;
858 }
859
860 return NULL;
861}
862
b0407983
MW
863/**
864 * usb4_port_unlock() - Unlock USB4 downstream port
865 * @port: USB4 port to unlock
866 *
867 * Unlocks USB4 downstream port so that the connection manager can
868 * access the router below this port.
869 */
870int usb4_port_unlock(struct tb_port *port)
871{
872 int ret;
873 u32 val;
874
875 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
876 if (ret)
877 return ret;
878
879 val &= ~ADP_CS_4_LCK;
880 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
881}
3b1d8d57 882
e28178bf
MW
883static int usb4_port_set_configured(struct tb_port *port, bool configured)
884{
885 int ret;
886 u32 val;
887
888 if (!port->cap_usb4)
889 return -EINVAL;
890
891 ret = tb_port_read(port, &val, TB_CFG_PORT,
892 port->cap_usb4 + PORT_CS_19, 1);
893 if (ret)
894 return ret;
895
896 if (configured)
897 val |= PORT_CS_19_PC;
898 else
899 val &= ~PORT_CS_19_PC;
900
901 return tb_port_write(port, &val, TB_CFG_PORT,
902 port->cap_usb4 + PORT_CS_19, 1);
903}
904
905/**
906 * usb4_port_configure() - Set USB4 port configured
907 * @port: USB4 router
908 *
909 * Sets the USB4 link to be configured for power management purposes.
910 */
911int usb4_port_configure(struct tb_port *port)
912{
913 return usb4_port_set_configured(port, true);
914}
915
916/**
917 * usb4_port_unconfigure() - Set USB4 port unconfigured
918 * @port: USB4 router
919 *
920 * Sets the USB4 link to be unconfigured for power management purposes.
921 */
922void usb4_port_unconfigure(struct tb_port *port)
923{
924 usb4_port_set_configured(port, false);
925}
926
284652a4
MW
927static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
928{
929 int ret;
930 u32 val;
931
932 if (!port->cap_usb4)
933 return -EINVAL;
934
935 ret = tb_port_read(port, &val, TB_CFG_PORT,
936 port->cap_usb4 + PORT_CS_19, 1);
937 if (ret)
938 return ret;
939
940 if (configured)
941 val |= PORT_CS_19_PID;
942 else
943 val &= ~PORT_CS_19_PID;
944
945 return tb_port_write(port, &val, TB_CFG_PORT,
946 port->cap_usb4 + PORT_CS_19, 1);
947}
948
949/**
950 * usb4_port_configure_xdomain() - Configure port for XDomain
951 * @port: USB4 port connected to another host
952 *
953 * Marks the USB4 port as being connected to another host. Returns %0 in
954 * success and negative errno in failure.
955 */
956int usb4_port_configure_xdomain(struct tb_port *port)
957{
958 return usb4_set_xdomain_configured(port, true);
959}
960
961/**
962 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
963 * @port: USB4 port that was connected to another host
964 *
965 * Clears USB4 port from being marked as XDomain.
966 */
967void usb4_port_unconfigure_xdomain(struct tb_port *port)
968{
969 usb4_set_xdomain_configured(port, false);
970}
971
3b1d8d57
MW
972static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
973 u32 value, int timeout_msec)
974{
975 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
976
977 do {
978 u32 val;
979 int ret;
980
981 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
982 if (ret)
983 return ret;
984
985 if ((val & bit) == value)
986 return 0;
987
988 usleep_range(50, 100);
989 } while (ktime_before(ktime_get(), timeout));
990
991 return -ETIMEDOUT;
992}
993
02d12855
RM
994static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
995{
996 if (dwords > USB4_DATA_DWORDS)
997 return -EINVAL;
998
999 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1000 dwords);
1001}
1002
1003static int usb4_port_write_data(struct tb_port *port, const void *data,
1004 size_t dwords)
1005{
1006 if (dwords > USB4_DATA_DWORDS)
1007 return -EINVAL;
1008
1009 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1010 dwords);
1011}
1012
1013static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
1014 u8 index, u8 reg, void *buf, u8 size)
1015{
1016 size_t dwords = DIV_ROUND_UP(size, 4);
1017 int ret;
1018 u32 val;
1019
1020 if (!port->cap_usb4)
1021 return -EINVAL;
1022
1023 val = reg;
1024 val |= size << PORT_CS_1_LENGTH_SHIFT;
1025 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1026 if (target == USB4_SB_TARGET_RETIMER)
1027 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1028 val |= PORT_CS_1_PND;
1029
1030 ret = tb_port_write(port, &val, TB_CFG_PORT,
1031 port->cap_usb4 + PORT_CS_1, 1);
1032 if (ret)
1033 return ret;
1034
1035 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1036 PORT_CS_1_PND, 0, 500);
1037 if (ret)
1038 return ret;
1039
1040 ret = tb_port_read(port, &val, TB_CFG_PORT,
1041 port->cap_usb4 + PORT_CS_1, 1);
1042 if (ret)
1043 return ret;
1044
1045 if (val & PORT_CS_1_NR)
1046 return -ENODEV;
1047 if (val & PORT_CS_1_RC)
1048 return -EIO;
1049
1050 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1051}
1052
1053static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1054 u8 index, u8 reg, const void *buf, u8 size)
1055{
1056 size_t dwords = DIV_ROUND_UP(size, 4);
1057 int ret;
1058 u32 val;
1059
1060 if (!port->cap_usb4)
1061 return -EINVAL;
1062
1063 if (buf) {
1064 ret = usb4_port_write_data(port, buf, dwords);
1065 if (ret)
1066 return ret;
1067 }
1068
1069 val = reg;
1070 val |= size << PORT_CS_1_LENGTH_SHIFT;
1071 val |= PORT_CS_1_WNR_WRITE;
1072 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1073 if (target == USB4_SB_TARGET_RETIMER)
1074 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1075 val |= PORT_CS_1_PND;
1076
1077 ret = tb_port_write(port, &val, TB_CFG_PORT,
1078 port->cap_usb4 + PORT_CS_1, 1);
1079 if (ret)
1080 return ret;
1081
1082 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1083 PORT_CS_1_PND, 0, 500);
1084 if (ret)
1085 return ret;
1086
1087 ret = tb_port_read(port, &val, TB_CFG_PORT,
1088 port->cap_usb4 + PORT_CS_1, 1);
1089 if (ret)
1090 return ret;
1091
1092 if (val & PORT_CS_1_NR)
1093 return -ENODEV;
1094 if (val & PORT_CS_1_RC)
1095 return -EIO;
1096
1097 return 0;
1098}
1099
1100static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1101 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1102{
1103 ktime_t timeout;
1104 u32 val;
1105 int ret;
1106
1107 val = opcode;
1108 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1109 sizeof(val));
1110 if (ret)
1111 return ret;
1112
1113 timeout = ktime_add_ms(ktime_get(), timeout_msec);
1114
1115 do {
1116 /* Check results */
1117 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1118 &val, sizeof(val));
1119 if (ret)
1120 return ret;
1121
1122 switch (val) {
1123 case 0:
1124 return 0;
1125
1126 case USB4_SB_OPCODE_ERR:
1127 return -EAGAIN;
1128
1129 case USB4_SB_OPCODE_ONS:
1130 return -EOPNOTSUPP;
1131
1132 default:
1133 if (val != opcode)
1134 return -EIO;
1135 break;
1136 }
1137 } while (ktime_before(ktime_get(), timeout));
1138
1139 return -ETIMEDOUT;
1140}
1141
1142/**
1143 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1144 * @port: USB4 port
1145 *
1146 * This forces the USB4 port to send broadcast RT transaction which
1147 * makes the retimers on the link to assign index to themselves. Returns
1148 * %0 in case of success and negative errno if there was an error.
1149 */
1150int usb4_port_enumerate_retimers(struct tb_port *port)
1151{
1152 u32 val;
1153
1154 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1155 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1156 USB4_SB_OPCODE, &val, sizeof(val));
1157}
1158
1159static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1160 enum usb4_sb_opcode opcode,
1161 int timeout_msec)
1162{
1163 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1164 timeout_msec);
1165}
1166
1167/**
1168 * usb4_port_retimer_read() - Read from retimer sideband registers
1169 * @port: USB4 port
1170 * @index: Retimer index
1171 * @reg: Sideband register to read
1172 * @buf: Data from @reg is stored here
1173 * @size: Number of bytes to read
1174 *
1175 * Function reads retimer sideband registers starting from @reg. The
1176 * retimer is connected to @port at @index. Returns %0 in case of
1177 * success, and read data is copied to @buf. If there is no retimer
1178 * present at given @index returns %-ENODEV. In any other failure
1179 * returns negative errno.
1180 */
1181int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1182 u8 size)
1183{
1184 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1185 size);
1186}
1187
1188/**
1189 * usb4_port_retimer_write() - Write to retimer sideband registers
1190 * @port: USB4 port
1191 * @index: Retimer index
1192 * @reg: Sideband register to write
1193 * @buf: Data that is written starting from @reg
1194 * @size: Number of bytes to write
1195 *
1196 * Writes retimer sideband registers starting from @reg. The retimer is
1197 * connected to @port at @index. Returns %0 in case of success. If there
1198 * is no retimer present at given @index returns %-ENODEV. In any other
1199 * failure returns negative errno.
1200 */
1201int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1202 const void *buf, u8 size)
1203{
1204 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1205 size);
1206}
1207
1208/**
1209 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1210 * @port: USB4 port
1211 * @index: Retimer index
1212 *
1213 * If the retimer at @index is last one (connected directly to the
1214 * Type-C port) this function returns %1. If it is not returns %0. If
1215 * the retimer is not present returns %-ENODEV. Otherwise returns
1216 * negative errno.
1217 */
1218int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1219{
1220 u32 metadata;
1221 int ret;
1222
1223 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1224 500);
1225 if (ret)
1226 return ret;
1227
1228 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1229 sizeof(metadata));
1230 return ret ? ret : metadata & 1;
1231}
1232
1233/**
1234 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1235 * @port: USB4 port
1236 * @index: Retimer index
1237 *
1238 * Reads NVM sector size (in bytes) of a retimer at @index. This
1239 * operation can be used to determine whether the retimer supports NVM
1240 * upgrade for example. Returns sector size in bytes or negative errno
1241 * in case of error. Specifically returns %-ENODEV if there is no
1242 * retimer at @index.
1243 */
1244int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1245{
1246 u32 metadata;
1247 int ret;
1248
1249 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1250 500);
1251 if (ret)
1252 return ret;
1253
1254 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1255 sizeof(metadata));
1256 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1257}
1258
1259static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1260 unsigned int address)
1261{
1262 u32 metadata, dwaddress;
1263 int ret;
1264
1265 dwaddress = address / 4;
1266 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1267 USB4_NVM_SET_OFFSET_MASK;
1268
1269 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1270 sizeof(metadata));
1271 if (ret)
1272 return ret;
1273
1274 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1275 500);
1276}
1277
1278struct retimer_info {
1279 struct tb_port *port;
1280 u8 index;
1281};
1282
1283static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
1284 size_t dwords)
1285
1286{
1287 const struct retimer_info *info = data;
1288 struct tb_port *port = info->port;
1289 u8 index = info->index;
1290 int ret;
1291
1292 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1293 buf, dwords * 4);
1294 if (ret)
1295 return ret;
1296
1297 return usb4_port_retimer_op(port, index,
1298 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1299}
1300
1301/**
1302 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1303 * @port: USB4 port
1304 * @index: Retimer index
1305 * @address: Byte address where to start the write
1306 * @buf: Data to write
1307 * @size: Size in bytes how much to write
1308 *
1309 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1310 * upgrade. Returns %0 if the data was written successfully and negative
1311 * errno in case of failure. Specifically returns %-ENODEV if there is
1312 * no retimer at @index.
1313 */
1314int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1315 const void *buf, size_t size)
1316{
1317 struct retimer_info info = { .port = port, .index = index };
1318 int ret;
1319
1320 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1321 if (ret)
1322 return ret;
1323
1324 return usb4_do_write_data(address, buf, size,
1325 usb4_port_retimer_nvm_write_next_block, &info);
1326}
1327
1328/**
1329 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1330 * @port: USB4 port
1331 * @index: Retimer index
1332 *
1333 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1334 * this function can be used to trigger the NVM upgrade process. If
1335 * successful the retimer restarts with the new NVM and may not have the
1336 * index set so one needs to call usb4_port_enumerate_retimers() to
1337 * force index to be assigned.
1338 */
1339int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1340{
1341 u32 val;
1342
1343 /*
1344 * We need to use the raw operation here because once the
1345 * authentication completes the retimer index is not set anymore
1346 * so we do not get back the status now.
1347 */
1348 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1349 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1350 USB4_SB_OPCODE, &val, sizeof(val));
1351}
1352
1353/**
1354 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1355 * @port: USB4 port
1356 * @index: Retimer index
1357 * @status: Raw status code read from metadata
1358 *
1359 * This can be called after usb4_port_retimer_nvm_authenticate() and
1360 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1361 *
1362 * Returns %0 if the authentication status was successfully read. The
1363 * completion metadata (the result) is then stored into @status. If
1364 * reading the status fails, returns negative errno.
1365 */
1366int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1367 u32 *status)
1368{
1369 u32 metadata, val;
1370 int ret;
1371
1372 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1373 sizeof(val));
1374 if (ret)
1375 return ret;
1376
1377 switch (val) {
1378 case 0:
1379 *status = 0;
1380 return 0;
1381
1382 case USB4_SB_OPCODE_ERR:
1383 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1384 &metadata, sizeof(metadata));
1385 if (ret)
1386 return ret;
1387
1388 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1389 return 0;
1390
1391 case USB4_SB_OPCODE_ONS:
1392 return -EOPNOTSUPP;
1393
1394 default:
1395 return -EIO;
1396 }
1397}
1398
1399static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1400 void *buf, size_t dwords)
1401{
1402 const struct retimer_info *info = data;
1403 struct tb_port *port = info->port;
1404 u8 index = info->index;
1405 u32 metadata;
1406 int ret;
1407
1408 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1409 if (dwords < USB4_DATA_DWORDS)
1410 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1411
1412 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1413 sizeof(metadata));
1414 if (ret)
1415 return ret;
1416
1417 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1418 if (ret)
1419 return ret;
1420
1421 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1422 dwords * 4);
1423}
1424
1425/**
1426 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1427 * @port: USB4 port
1428 * @index: Retimer index
1429 * @address: NVM address (in bytes) to start reading
1430 * @buf: Data read from NVM is stored here
1431 * @size: Number of bytes to read
1432 *
1433 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1434 * read was successful and negative errno in case of failure.
1435 * Specifically returns %-ENODEV if there is no retimer at @index.
1436 */
1437int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1438 unsigned int address, void *buf, size_t size)
1439{
1440 struct retimer_info info = { .port = port, .index = index };
1441
1442 return usb4_do_read_data(address, buf, size,
1443 usb4_port_retimer_nvm_read_block, &info);
1444}
1445
3b1d8d57
MW
1446/**
1447 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1448 * @port: USB3 adapter port
1449 *
1450 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1451 * Negative errno in case of error.
1452 */
1453int usb4_usb3_port_max_link_rate(struct tb_port *port)
1454{
1455 int ret, lr;
1456 u32 val;
1457
1458 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1459 return -EINVAL;
1460
1461 ret = tb_port_read(port, &val, TB_CFG_PORT,
1462 port->cap_adap + ADP_USB3_CS_4, 1);
1463 if (ret)
1464 return ret;
1465
1466 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1467 return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1468}
1469
1470/**
1471 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1472 * @port: USB3 adapter port
1473 *
1474 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1475 * link is not up returns %0 and negative errno in case of failure.
1476 */
1477int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1478{
1479 int ret, lr;
1480 u32 val;
1481
1482 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1483 return -EINVAL;
1484
1485 ret = tb_port_read(port, &val, TB_CFG_PORT,
1486 port->cap_adap + ADP_USB3_CS_4, 1);
1487 if (ret)
1488 return ret;
1489
1490 if (!(val & ADP_USB3_CS_4_ULV))
1491 return 0;
1492
1493 lr = val & ADP_USB3_CS_4_ALR_MASK;
1494 return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1495}
1496
1497static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1498{
1499 int ret;
1500 u32 val;
1501
1502 if (!tb_port_is_usb3_down(port))
1503 return -EINVAL;
1504 if (tb_route(port->sw))
1505 return -EINVAL;
1506
1507 ret = tb_port_read(port, &val, TB_CFG_PORT,
1508 port->cap_adap + ADP_USB3_CS_2, 1);
1509 if (ret)
1510 return ret;
1511
1512 if (request)
1513 val |= ADP_USB3_CS_2_CMR;
1514 else
1515 val &= ~ADP_USB3_CS_2_CMR;
1516
1517 ret = tb_port_write(port, &val, TB_CFG_PORT,
1518 port->cap_adap + ADP_USB3_CS_2, 1);
1519 if (ret)
1520 return ret;
1521
1522 /*
1523 * We can use val here directly as the CMR bit is in the same place
1524 * as HCA. Just mask out others.
1525 */
1526 val &= ADP_USB3_CS_2_CMR;
1527 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1528 ADP_USB3_CS_1_HCA, val, 1500);
1529}
1530
1531static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1532{
1533 return usb4_usb3_port_cm_request(port, true);
1534}
1535
1536static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1537{
1538 return usb4_usb3_port_cm_request(port, false);
1539}
1540
1541static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1542{
1543 unsigned long uframes;
1544
4c767ce4 1545 uframes = bw * 512UL << scale;
3b1d8d57
MW
1546 return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1547}
1548
1549static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1550{
1551 unsigned long uframes;
1552
1553 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1554 uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
4c767ce4 1555 return DIV_ROUND_UP(uframes, 512UL << scale);
3b1d8d57
MW
1556}
1557
1558static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1559 int *upstream_bw,
1560 int *downstream_bw)
1561{
1562 u32 val, bw, scale;
1563 int ret;
1564
1565 ret = tb_port_read(port, &val, TB_CFG_PORT,
1566 port->cap_adap + ADP_USB3_CS_2, 1);
1567 if (ret)
1568 return ret;
1569
1570 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1571 port->cap_adap + ADP_USB3_CS_3, 1);
1572 if (ret)
1573 return ret;
1574
1575 scale &= ADP_USB3_CS_3_SCALE_MASK;
1576
1577 bw = val & ADP_USB3_CS_2_AUBW_MASK;
1578 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1579
1580 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1581 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1582
1583 return 0;
1584}
1585
1586/**
1587 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1588 * @port: USB3 adapter port
1589 * @upstream_bw: Allocated upstream bandwidth is stored here
1590 * @downstream_bw: Allocated downstream bandwidth is stored here
1591 *
1592 * Stores currently allocated USB3 bandwidth into @upstream_bw and
1593 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1594 * errno in failure.
1595 */
1596int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1597 int *downstream_bw)
1598{
1599 int ret;
1600
1601 ret = usb4_usb3_port_set_cm_request(port);
1602 if (ret)
1603 return ret;
1604
1605 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1606 downstream_bw);
1607 usb4_usb3_port_clear_cm_request(port);
1608
1609 return ret;
1610}
1611
1612static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1613 int *upstream_bw,
1614 int *downstream_bw)
1615{
1616 u32 val, bw, scale;
1617 int ret;
1618
1619 ret = tb_port_read(port, &val, TB_CFG_PORT,
1620 port->cap_adap + ADP_USB3_CS_1, 1);
1621 if (ret)
1622 return ret;
1623
1624 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1625 port->cap_adap + ADP_USB3_CS_3, 1);
1626 if (ret)
1627 return ret;
1628
1629 scale &= ADP_USB3_CS_3_SCALE_MASK;
1630
1631 bw = val & ADP_USB3_CS_1_CUBW_MASK;
1632 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1633
1634 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1635 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1636
1637 return 0;
1638}
1639
1640static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1641 int upstream_bw,
1642 int downstream_bw)
1643{
1644 u32 val, ubw, dbw, scale;
1645 int ret;
1646
1647 /* Read the used scale, hardware default is 0 */
1648 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1649 port->cap_adap + ADP_USB3_CS_3, 1);
1650 if (ret)
1651 return ret;
1652
1653 scale &= ADP_USB3_CS_3_SCALE_MASK;
1654 ubw = mbps_to_usb3_bw(upstream_bw, scale);
1655 dbw = mbps_to_usb3_bw(downstream_bw, scale);
1656
1657 ret = tb_port_read(port, &val, TB_CFG_PORT,
1658 port->cap_adap + ADP_USB3_CS_2, 1);
1659 if (ret)
1660 return ret;
1661
1662 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1663 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1664 val |= ubw;
1665
1666 return tb_port_write(port, &val, TB_CFG_PORT,
1667 port->cap_adap + ADP_USB3_CS_2, 1);
1668}
1669
1670/**
1671 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1672 * @port: USB3 adapter port
1673 * @upstream_bw: New upstream bandwidth
1674 * @downstream_bw: New downstream bandwidth
1675 *
1676 * This can be used to set how much bandwidth is allocated for the USB3
1677 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1678 * new values programmed to the USB3 adapter allocation registers. If
1679 * the values are lower than what is currently consumed the allocation
1680 * is set to what is currently consumed instead (consumed bandwidth
1681 * cannot be taken away by CM). The actual new values are returned in
1682 * @upstream_bw and @downstream_bw.
1683 *
1684 * Returns %0 in case of success and negative errno if there was a
1685 * failure.
1686 */
1687int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1688 int *downstream_bw)
1689{
1690 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1691
1692 ret = usb4_usb3_port_set_cm_request(port);
1693 if (ret)
1694 return ret;
1695
1696 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1697 &consumed_down);
1698 if (ret)
1699 goto err_request;
1700
1701 /* Don't allow it go lower than what is consumed */
1702 allocate_up = max(*upstream_bw, consumed_up);
1703 allocate_down = max(*downstream_bw, consumed_down);
1704
1705 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1706 allocate_down);
1707 if (ret)
1708 goto err_request;
1709
1710 *upstream_bw = allocate_up;
1711 *downstream_bw = allocate_down;
1712
1713err_request:
1714 usb4_usb3_port_clear_cm_request(port);
1715 return ret;
1716}
1717
1718/**
1719 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1720 * @port: USB3 adapter port
1721 * @upstream_bw: New allocated upstream bandwidth
1722 * @downstream_bw: New allocated downstream bandwidth
1723 *
1724 * Releases USB3 allocated bandwidth down to what is actually consumed.
1725 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1726 *
1727 * Returns 0% in success and negative errno in case of failure.
1728 */
1729int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1730 int *downstream_bw)
1731{
1732 int ret, consumed_up, consumed_down;
1733
1734 ret = usb4_usb3_port_set_cm_request(port);
1735 if (ret)
1736 return ret;
1737
1738 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1739 &consumed_down);
1740 if (ret)
1741 goto err_request;
1742
1743 /*
1744 * Always keep 1000 Mb/s to make sure xHCI has at least some
1745 * bandwidth available for isochronous traffic.
1746 */
1747 if (consumed_up < 1000)
1748 consumed_up = 1000;
1749 if (consumed_down < 1000)
1750 consumed_down = 1000;
1751
1752 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
1753 consumed_down);
1754 if (ret)
1755 goto err_request;
1756
1757 *upstream_bw = consumed_up;
1758 *downstream_bw = consumed_down;
1759
1760err_request:
1761 usb4_usb3_port_clear_cm_request(port);
1762 return ret;
1763}