Merge tag 'powerpc-6.10-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-block.git] / drivers / thunderbolt / usb4.c
CommitLineData
b0407983
MW
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/ktime.h>
5d883668 12#include <linux/units.h>
b0407983 13
02d12855 14#include "sb_regs.h"
b0407983
MW
15#include "tb.h"
16
b0407983 17#define USB4_DATA_RETRIES 3
7c81a578 18#define USB4_DATA_DWORDS 16
b0407983 19
02d12855
RM
20enum usb4_sb_target {
21 USB4_SB_TARGET_ROUTER,
22 USB4_SB_TARGET_PARTNER,
23 USB4_SB_TARGET_RETIMER,
24};
25
b0407983
MW
26#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
27#define USB4_NVM_READ_OFFSET_SHIFT 2
28#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
29#define USB4_NVM_READ_LENGTH_SHIFT 24
30
31#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
32#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
33
34#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
35#define USB4_DROM_ADDRESS_SHIFT 2
36#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
37#define USB4_DROM_SIZE_SHIFT 15
38
39#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
40
56ad3aef
MW
41#define USB4_BA_LENGTH_MASK GENMASK(7, 0)
42#define USB4_BA_INDEX_MASK GENMASK(15, 0)
43
44enum usb4_ba_index {
45 USB4_BA_MAX_USB3 = 0x1,
46 USB4_BA_MIN_DP_AUX = 0x2,
47 USB4_BA_MIN_DP_MAIN = 0x3,
48 USB4_BA_MAX_PCIE = 0x4,
49 USB4_BA_MAX_HI = 0x5,
50};
51
52#define USB4_BA_VALUE_MASK GENMASK(31, 16)
53#define USB4_BA_VALUE_SHIFT 16
54
c6ca1ac9
MW
55/* Delays in us used with usb4_port_wait_for_bit() */
56#define USB4_PORT_DELAY 50
57#define USB4_PORT_SB_DELAY 5000
58
9490f711
MW
59static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
60 u32 *metadata, u8 *status,
61 const void *tx_data, size_t tx_dwords,
62 void *rx_data, size_t rx_dwords)
b0407983
MW
63{
64 u32 val;
65 int ret;
66
fe265a06
MW
67 if (metadata) {
68 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
69 if (ret)
70 return ret;
71 }
83bab44a
MW
72 if (tx_dwords) {
73 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
74 tx_dwords);
75 if (ret)
76 return ret;
77 }
fe265a06 78
b0407983
MW
79 val = opcode | ROUTER_CS_26_OV;
80 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
81 if (ret)
82 return ret;
83
1639664f 84 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
b0407983
MW
85 if (ret)
86 return ret;
87
88 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
c3bf9930
MW
89 if (ret)
90 return ret;
91
b0407983
MW
92 if (val & ROUTER_CS_26_ONS)
93 return -EOPNOTSUPP;
94
661b1947
MW
95 if (status)
96 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
97 ROUTER_CS_26_STATUS_SHIFT;
fe265a06 98
83bab44a
MW
99 if (metadata) {
100 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
101 if (ret)
102 return ret;
103 }
104 if (rx_dwords) {
105 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
106 rx_dwords);
107 if (ret)
108 return ret;
109 }
110
b0407983
MW
111 return 0;
112}
113
9490f711
MW
114static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
115 u8 *status, const void *tx_data, size_t tx_dwords,
116 void *rx_data, size_t rx_dwords)
117{
118 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
119
7c81a578 120 if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
9490f711
MW
121 return -EINVAL;
122
123 /*
124 * If the connection manager implementation provides USB4 router
125 * operation proxy callback, call it here instead of running the
126 * operation natively.
127 */
128 if (cm_ops->usb4_switch_op) {
129 int ret;
130
131 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
132 tx_data, tx_dwords, rx_data,
133 rx_dwords);
134 if (ret != -EOPNOTSUPP)
135 return ret;
136
137 /*
138 * If the proxy was not supported then run the native
139 * router operation instead.
140 */
141 }
142
143 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
144 tx_dwords, rx_data, rx_dwords);
145}
146
83bab44a
MW
147static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
148 u32 *metadata, u8 *status)
149{
150 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
151}
152
153static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
154 u32 *metadata, u8 *status,
155 const void *tx_data, size_t tx_dwords,
156 void *rx_data, size_t rx_dwords)
157{
158 return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
159 tx_dwords, rx_data, rx_dwords);
b0407983
MW
160}
161
dcd12aca
GF
162/**
163 * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
164 * @sw: Router whose wakes to check
165 *
166 * Checks wakes occurred during suspend and notify the PM core about them.
167 */
168void usb4_switch_check_wakes(struct tb_switch *sw)
b2911a59 169{
a5cfc9d6
RK
170 bool wakeup_usb4 = false;
171 struct usb4_port *usb4;
b2911a59
MW
172 struct tb_port *port;
173 bool wakeup = false;
174 u32 val;
175
b2911a59
MW
176 if (tb_route(sw)) {
177 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
178 return;
179
180 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
181 (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
182 (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
183
184 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
185 }
186
a5cfc9d6
RK
187 /*
188 * Check for any downstream ports for USB4 wake,
189 * connection wake and disconnection wake.
190 */
b2911a59 191 tb_switch_for_each_port(sw, port) {
a5cfc9d6 192 if (!port->cap_usb4)
b2911a59
MW
193 continue;
194
195 if (tb_port_read(port, &val, TB_CFG_PORT,
196 port->cap_usb4 + PORT_CS_18, 1))
197 break;
198
a5cfc9d6
RK
199 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
200 (val & PORT_CS_18_WOU4S) ? "yes" : "no",
201 (val & PORT_CS_18_WOCS) ? "yes" : "no",
202 (val & PORT_CS_18_WODS) ? "yes" : "no");
203
204 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
205 PORT_CS_18_WODS);
206
207 usb4 = port->usb4;
208 if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
209 pm_wakeup_event(&usb4->dev, 0);
b2911a59 210
a5cfc9d6 211 wakeup |= wakeup_usb4;
b2911a59
MW
212 }
213
214 if (wakeup)
215 pm_wakeup_event(&sw->dev, 0);
216}
217
bbcf40b3
MW
218static bool link_is_usb4(struct tb_port *port)
219{
220 u32 val;
221
222 if (!port->cap_usb4)
223 return false;
224
225 if (tb_port_read(port, &val, TB_CFG_PORT,
226 port->cap_usb4 + PORT_CS_18, 1))
227 return false;
228
229 return !(val & PORT_CS_18_TCM);
230}
231
b0407983
MW
232/**
233 * usb4_switch_setup() - Additional setup for USB4 device
234 * @sw: USB4 router to setup
235 *
236 * USB4 routers need additional settings in order to enable all the
237 * tunneling. This function enables USB and PCIe tunneling if it can be
238 * enabled (e.g the parent switch also supports them). If USB tunneling
239 * is not available for some reason (like that there is Thunderbolt 3
240 * switch upstream) then the internal xHCI controller is enabled
241 * instead.
d49b4f04
MW
242 *
243 * This does not set the configuration valid bit of the router. To do
244 * that call usb4_switch_configuration_valid().
b0407983
MW
245 */
246int usb4_switch_setup(struct tb_switch *sw)
247{
7ce54221
GF
248 struct tb_switch *parent = tb_switch_parent(sw);
249 struct tb_port *down;
b0407983
MW
250 bool tbt3, xhci;
251 u32 val = 0;
252 int ret;
253
254 if (!tb_route(sw))
255 return 0;
256
257 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
258 if (ret)
259 return ret;
260
7ce54221
GF
261 down = tb_switch_downstream_port(sw);
262 sw->link_usb4 = link_is_usb4(down);
0f28879c 263 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
bbcf40b3 264
b0407983
MW
265 xhci = val & ROUTER_CS_6_HCI;
266 tbt3 = !(val & ROUTER_CS_6_TNS);
267
268 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
269 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
270
271 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
272 if (ret)
273 return ret;
274
c6da62a2
MW
275 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
276 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
e6f81858
RM
277 val |= ROUTER_CS_5_UTO;
278 xhci = false;
279 }
280
c6da62a2
MW
281 /*
282 * Only enable PCIe tunneling if the parent router supports it
283 * and it is not disabled.
284 */
285 if (tb_acpi_may_tunnel_pcie() &&
286 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
b0407983 287 val |= ROUTER_CS_5_PTO;
e6f81858
RM
288 /*
289 * xHCI can be enabled if PCIe tunneling is supported
290 * and the parent does not have any USB3 dowstream
291 * adapters (so we cannot do USB 3.x tunneling).
292 */
c7a7ac84 293 if (xhci)
b0407983
MW
294 val |= ROUTER_CS_5_HCO;
295 }
296
297 /* TBT3 supported by the CM */
ec4d82f8 298 val &= ~ROUTER_CS_5_CNS;
d49b4f04
MW
299
300 return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
301}
302
303/**
304 * usb4_switch_configuration_valid() - Set tunneling configuration to be valid
305 * @sw: USB4 router
306 *
307 * Sets configuration valid bit for the router. Must be called before
308 * any tunnels can be set through the router and after
309 * usb4_switch_setup() has been called. Can be called to host and device
310 * routers (does nothing for the latter).
311 *
312 * Returns %0 in success and negative errno otherwise.
313 */
314int usb4_switch_configuration_valid(struct tb_switch *sw)
315{
316 u32 val;
317 int ret;
318
319 if (!tb_route(sw))
320 return 0;
321
322 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
323 if (ret)
324 return ret;
325
b0407983
MW
326 val |= ROUTER_CS_5_CV;
327
328 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
329 if (ret)
330 return ret;
331
1639664f
GF
332 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
333 ROUTER_CS_6_CR, 50);
b0407983
MW
334}
335
336/**
337 * usb4_switch_read_uid() - Read UID from USB4 router
338 * @sw: USB4 router
21d78d86 339 * @uid: UID is stored here
b0407983
MW
340 *
341 * Reads 64-bit UID from USB4 router config space.
342 */
343int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
344{
345 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
346}
347
7e72846b 348static int usb4_switch_drom_read_block(void *data,
b0407983
MW
349 unsigned int dwaddress, void *buf,
350 size_t dwords)
351{
7e72846b 352 struct tb_switch *sw = data;
b0407983
MW
353 u8 status = 0;
354 u32 metadata;
355 int ret;
356
357 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
358 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
359 USB4_DROM_ADDRESS_MASK;
360
83bab44a
MW
361 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
362 &status, NULL, 0, buf, dwords);
b0407983
MW
363 if (ret)
364 return ret;
365
83bab44a 366 return status ? -EIO : 0;
b0407983
MW
367}
368
369/**
370 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
371 * @sw: USB4 router
21d78d86
MW
372 * @address: Byte address inside DROM to start reading
373 * @buf: Buffer where the DROM content is stored
374 * @size: Number of bytes to read from DROM
b0407983
MW
375 *
376 * Uses USB4 router operations to read router DROM. For devices this
377 * should always work but for hosts it may return %-EOPNOTSUPP in which
378 * case the host router does not have DROM.
379 */
380int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
381 size_t size)
382{
9b383037
MW
383 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
384 usb4_switch_drom_read_block, sw);
b0407983
MW
385}
386
b0407983
MW
387/**
388 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
389 * @sw: USB4 router
390 *
391 * Checks whether conditions are met so that lane bonding can be
392 * established with the upstream router. Call only for device routers.
393 */
394bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
395{
396 struct tb_port *up;
397 int ret;
398 u32 val;
399
400 up = tb_upstream_port(sw);
401 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
402 if (ret)
403 return false;
404
405 return !!(val & PORT_CS_18_BE);
406}
407
b2911a59
MW
408/**
409 * usb4_switch_set_wake() - Enabled/disable wake
410 * @sw: USB4 router
411 * @flags: Wakeup flags (%0 to disable)
412 *
413 * Enables/disables router to wake up from sleep.
414 */
415int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
416{
a5cfc9d6 417 struct usb4_port *usb4;
b2911a59
MW
418 struct tb_port *port;
419 u64 route = tb_route(sw);
420 u32 val;
421 int ret;
422
423 /*
424 * Enable wakes coming from all USB4 downstream ports (from
425 * child routers). For device routers do this also for the
426 * upstream USB4 port.
427 */
428 tb_switch_for_each_port(sw, port) {
f8fa2c2e
MW
429 if (!tb_port_is_null(port))
430 continue;
b2911a59
MW
431 if (!route && tb_is_upstream_port(port))
432 continue;
f8fa2c2e
MW
433 if (!port->cap_usb4)
434 continue;
b2911a59
MW
435
436 ret = tb_port_read(port, &val, TB_CFG_PORT,
437 port->cap_usb4 + PORT_CS_19, 1);
438 if (ret)
439 return ret;
440
441 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
442
3caf8887 443 if (tb_is_upstream_port(port)) {
b2911a59 444 val |= PORT_CS_19_WOU4;
3caf8887
MW
445 } else {
446 bool configured = val & PORT_CS_19_PC;
a5cfc9d6 447 usb4 = port->usb4;
3caf8887 448
a5cfc9d6
RK
449 if (((flags & TB_WAKE_ON_CONNECT) |
450 device_may_wakeup(&usb4->dev)) && !configured)
3caf8887 451 val |= PORT_CS_19_WOC;
a5cfc9d6
RK
452 if (((flags & TB_WAKE_ON_DISCONNECT) |
453 device_may_wakeup(&usb4->dev)) && configured)
3caf8887
MW
454 val |= PORT_CS_19_WOD;
455 if ((flags & TB_WAKE_ON_USB4) && configured)
456 val |= PORT_CS_19_WOU4;
457 }
b2911a59
MW
458
459 ret = tb_port_write(port, &val, TB_CFG_PORT,
460 port->cap_usb4 + PORT_CS_19, 1);
461 if (ret)
462 return ret;
463 }
464
465 /*
6026b703 466 * Enable wakes from PCIe, USB 3.x and DP on this router. Only
b2911a59
MW
467 * needed for device routers.
468 */
469 if (route) {
470 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
471 if (ret)
472 return ret;
473
6026b703 474 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
b2911a59
MW
475 if (flags & TB_WAKE_ON_USB3)
476 val |= ROUTER_CS_5_WOU;
477 if (flags & TB_WAKE_ON_PCIE)
478 val |= ROUTER_CS_5_WOP;
6026b703
MW
479 if (flags & TB_WAKE_ON_DP)
480 val |= ROUTER_CS_5_WOD;
b2911a59
MW
481
482 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
483 if (ret)
484 return ret;
485 }
486
487 return 0;
488}
489
b0407983
MW
490/**
491 * usb4_switch_set_sleep() - Prepare the router to enter sleep
492 * @sw: USB4 router
493 *
b2911a59
MW
494 * Sets sleep bit for the router. Returns when the router sleep ready
495 * bit has been asserted.
b0407983
MW
496 */
497int usb4_switch_set_sleep(struct tb_switch *sw)
498{
499 int ret;
500 u32 val;
501
502 /* Set sleep bit and wait for sleep ready to be asserted */
503 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
504 if (ret)
505 return ret;
506
507 val |= ROUTER_CS_5_SLP;
508
509 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
510 if (ret)
511 return ret;
512
1639664f
GF
513 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
514 ROUTER_CS_6_SLPR, 500);
b0407983
MW
515}
516
517/**
518 * usb4_switch_nvm_sector_size() - Return router NVM sector size
519 * @sw: USB4 router
520 *
521 * If the router supports NVM operations this function returns the NVM
522 * sector size in bytes. If NVM operations are not supported returns
523 * %-EOPNOTSUPP.
524 */
525int usb4_switch_nvm_sector_size(struct tb_switch *sw)
526{
527 u32 metadata;
528 u8 status;
529 int ret;
530
fe265a06
MW
531 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
532 &status);
b0407983
MW
533 if (ret)
534 return ret;
535
536 if (status)
537 return status == 0x2 ? -EOPNOTSUPP : -EIO;
538
b0407983
MW
539 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
540}
541
7e72846b 542static int usb4_switch_nvm_read_block(void *data,
b0407983
MW
543 unsigned int dwaddress, void *buf, size_t dwords)
544{
7e72846b 545 struct tb_switch *sw = data;
b0407983
MW
546 u8 status = 0;
547 u32 metadata;
548 int ret;
549
550 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
551 USB4_NVM_READ_LENGTH_MASK;
552 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
553 USB4_NVM_READ_OFFSET_MASK;
554
83bab44a
MW
555 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
556 &status, NULL, 0, buf, dwords);
b0407983
MW
557 if (ret)
558 return ret;
559
83bab44a 560 return status ? -EIO : 0;
b0407983
MW
561}
562
563/**
564 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
565 * @sw: USB4 router
566 * @address: Starting address in bytes
567 * @buf: Read data is placed here
568 * @size: How many bytes to read
569 *
570 * Reads NVM contents of the router. If NVM is not supported returns
571 * %-EOPNOTSUPP.
572 */
573int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
574 size_t size)
575{
9b383037
MW
576 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
577 usb4_switch_nvm_read_block, sw);
b0407983
MW
578}
579
1cbf680f
MW
580/**
581 * usb4_switch_nvm_set_offset() - Set NVM write offset
582 * @sw: USB4 router
583 * @address: Start offset
584 *
585 * Explicitly sets NVM write offset. Normally when writing to NVM this
586 * is done automatically by usb4_switch_nvm_write().
587 *
588 * Returns %0 in success and negative errno if there was a failure.
589 */
590int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
b0407983
MW
591{
592 u32 metadata, dwaddress;
593 u8 status = 0;
594 int ret;
595
596 dwaddress = address / 4;
597 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
598 USB4_NVM_SET_OFFSET_MASK;
599
fe265a06
MW
600 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
601 &status);
b0407983
MW
602 if (ret)
603 return ret;
604
605 return status ? -EIO : 0;
606}
607
9b383037
MW
608static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
609 const void *buf, size_t dwords)
b0407983 610{
7e72846b 611 struct tb_switch *sw = data;
b0407983
MW
612 u8 status;
613 int ret;
614
83bab44a
MW
615 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
616 buf, dwords, NULL, 0);
b0407983
MW
617 if (ret)
618 return ret;
619
620 return status ? -EIO : 0;
621}
622
623/**
624 * usb4_switch_nvm_write() - Write to the router NVM
625 * @sw: USB4 router
626 * @address: Start address where to write in bytes
627 * @buf: Pointer to the data to write
628 * @size: Size of @buf in bytes
629 *
630 * Writes @buf to the router NVM using USB4 router operations. If NVM
631 * write is not supported returns %-EOPNOTSUPP.
632 */
633int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
634 const void *buf, size_t size)
635{
636 int ret;
637
638 ret = usb4_switch_nvm_set_offset(sw, address);
639 if (ret)
640 return ret;
641
9b383037
MW
642 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
643 usb4_switch_nvm_write_next_block, sw);
b0407983
MW
644}
645
646/**
647 * usb4_switch_nvm_authenticate() - Authenticate new NVM
648 * @sw: USB4 router
649 *
650 * After the new NVM has been written via usb4_switch_nvm_write(), this
661b1947
MW
651 * function triggers NVM authentication process. The router gets power
652 * cycled and if the authentication is successful the new NVM starts
b0407983 653 * running. In case of failure returns negative errno.
661b1947
MW
654 *
655 * The caller should call usb4_switch_nvm_authenticate_status() to read
656 * the status of the authentication after power cycle. It should be the
657 * first router operation to avoid the status being lost.
b0407983
MW
658 */
659int usb4_switch_nvm_authenticate(struct tb_switch *sw)
660{
b0407983
MW
661 int ret;
662
fe265a06 663 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
661b1947
MW
664 switch (ret) {
665 /*
666 * The router is power cycled once NVM_AUTH is started so it is
667 * expected to get any of the following errors back.
668 */
669 case -EACCES:
670 case -ENOTCONN:
671 case -ETIMEDOUT:
672 return 0;
673
674 default:
675 return ret;
676 }
677}
678
679/**
680 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
681 * @sw: USB4 router
682 * @status: Status code of the operation
683 *
684 * The function checks if there is status available from the last NVM
685 * authenticate router operation. If there is status then %0 is returned
686 * and the status code is placed in @status. Returns negative errno in case
687 * of failure.
688 *
689 * Must be called before any other router operation.
690 */
691int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
692{
9490f711 693 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
661b1947
MW
694 u16 opcode;
695 u32 val;
696 int ret;
697
9490f711
MW
698 if (cm_ops->usb4_switch_nvm_authenticate_status) {
699 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
700 if (ret != -EOPNOTSUPP)
701 return ret;
702 }
703
661b1947 704 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
b0407983
MW
705 if (ret)
706 return ret;
707
661b1947
MW
708 /* Check that the opcode is correct */
709 opcode = val & ROUTER_CS_26_OPCODE_MASK;
710 if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
711 if (val & ROUTER_CS_26_OV)
712 return -EBUSY;
713 if (val & ROUTER_CS_26_ONS)
714 return -EOPNOTSUPP;
715
716 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
717 ROUTER_CS_26_STATUS_SHIFT;
718 } else {
719 *status = 0;
b0407983 720 }
661b1947
MW
721
722 return 0;
b0407983
MW
723}
724
56ad3aef
MW
725/**
726 * usb4_switch_credits_init() - Read buffer allocation parameters
727 * @sw: USB4 router
728 *
729 * Reads @sw buffer allocation parameters and initializes @sw buffer
730 * allocation fields accordingly. Specifically @sw->credits_allocation
731 * is set to %true if these parameters can be used in tunneling.
732 *
733 * Returns %0 on success and negative errno otherwise.
734 */
735int usb4_switch_credits_init(struct tb_switch *sw)
736{
737 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
738 int ret, length, i, nports;
739 const struct tb_port *port;
7c81a578 740 u32 data[USB4_DATA_DWORDS];
56ad3aef
MW
741 u32 metadata = 0;
742 u8 status = 0;
743
744 memset(data, 0, sizeof(data));
745 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
746 &status, NULL, 0, data, ARRAY_SIZE(data));
747 if (ret)
748 return ret;
749 if (status)
750 return -EIO;
751
752 length = metadata & USB4_BA_LENGTH_MASK;
753 if (WARN_ON(length > ARRAY_SIZE(data)))
754 return -EMSGSIZE;
755
756 max_usb3 = -1;
757 min_dp_aux = -1;
758 min_dp_main = -1;
759 max_pcie = -1;
760 max_dma = -1;
761
762 tb_sw_dbg(sw, "credit allocation parameters:\n");
763
764 for (i = 0; i < length; i++) {
765 u16 index, value;
766
767 index = data[i] & USB4_BA_INDEX_MASK;
768 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
769
770 switch (index) {
771 case USB4_BA_MAX_USB3:
772 tb_sw_dbg(sw, " USB3: %u\n", value);
773 max_usb3 = value;
774 break;
775 case USB4_BA_MIN_DP_AUX:
776 tb_sw_dbg(sw, " DP AUX: %u\n", value);
777 min_dp_aux = value;
778 break;
779 case USB4_BA_MIN_DP_MAIN:
780 tb_sw_dbg(sw, " DP main: %u\n", value);
781 min_dp_main = value;
782 break;
783 case USB4_BA_MAX_PCIE:
784 tb_sw_dbg(sw, " PCIe: %u\n", value);
785 max_pcie = value;
786 break;
787 case USB4_BA_MAX_HI:
788 tb_sw_dbg(sw, " DMA: %u\n", value);
789 max_dma = value;
790 break;
791 default:
792 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
793 index);
794 break;
795 }
796 }
797
798 /*
799 * Validate the buffer allocation preferences. If we find
800 * issues, log a warning and fall back using the hard-coded
801 * values.
802 */
803
804 /* Host router must report baMaxHI */
805 if (!tb_route(sw) && max_dma < 0) {
806 tb_sw_warn(sw, "host router is missing baMaxHI\n");
807 goto err_invalid;
808 }
809
810 nports = 0;
811 tb_switch_for_each_port(sw, port) {
812 if (tb_port_is_null(port))
813 nports++;
814 }
815
816 /* Must have DP buffer allocation (multiple USB4 ports) */
817 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
818 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
819 goto err_invalid;
820 }
821
822 tb_switch_for_each_port(sw, port) {
823 if (tb_port_is_dpout(port) && min_dp_main < 0) {
824 tb_sw_warn(sw, "missing baMinDPmain");
825 goto err_invalid;
826 }
827 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
828 min_dp_aux < 0) {
829 tb_sw_warn(sw, "missing baMinDPaux");
830 goto err_invalid;
831 }
832 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
833 max_usb3 < 0) {
834 tb_sw_warn(sw, "missing baMaxUSB3");
835 goto err_invalid;
836 }
837 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
838 max_pcie < 0) {
839 tb_sw_warn(sw, "missing baMaxPCIe");
840 goto err_invalid;
841 }
842 }
843
844 /*
845 * Buffer allocation passed the validation so we can use it in
846 * path creation.
847 */
848 sw->credit_allocation = true;
849 if (max_usb3 > 0)
850 sw->max_usb3_credits = max_usb3;
851 if (min_dp_aux > 0)
852 sw->min_dp_aux_credits = min_dp_aux;
853 if (min_dp_main > 0)
854 sw->min_dp_main_credits = min_dp_main;
855 if (max_pcie > 0)
856 sw->max_pcie_credits = max_pcie;
857 if (max_dma > 0)
858 sw->max_dma_credits = max_dma;
859
860 return 0;
861
862err_invalid:
863 return -EINVAL;
864}
865
b0407983
MW
866/**
867 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
868 * @sw: USB4 router
869 * @in: DP IN adapter
870 *
871 * For DP tunneling this function can be used to query availability of
872 * DP IN resource. Returns true if the resource is available for DP
873 * tunneling, false otherwise.
874 */
875bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
876{
fe265a06 877 u32 metadata = in->port;
b0407983
MW
878 u8 status;
879 int ret;
880
fe265a06
MW
881 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
882 &status);
b0407983
MW
883 /*
884 * If DP resource allocation is not supported assume it is
885 * always available.
886 */
887 if (ret == -EOPNOTSUPP)
888 return true;
4e99c98e 889 if (ret)
b0407983
MW
890 return false;
891
892 return !status;
893}
894
895/**
896 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
897 * @sw: USB4 router
898 * @in: DP IN adapter
899 *
900 * Allocates DP IN resource for DP tunneling using USB4 router
901 * operations. If the resource was allocated returns %0. Otherwise
902 * returns negative errno, in particular %-EBUSY if the resource is
903 * already allocated.
904 */
905int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
906{
fe265a06 907 u32 metadata = in->port;
b0407983
MW
908 u8 status;
909 int ret;
910
fe265a06
MW
911 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
912 &status);
b0407983
MW
913 if (ret == -EOPNOTSUPP)
914 return 0;
4e99c98e 915 if (ret)
b0407983
MW
916 return ret;
917
918 return status ? -EBUSY : 0;
919}
920
921/**
922 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
923 * @sw: USB4 router
924 * @in: DP IN adapter
925 *
926 * Releases the previously allocated DP IN resource.
927 */
928int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
929{
fe265a06 930 u32 metadata = in->port;
b0407983
MW
931 u8 status;
932 int ret;
933
fe265a06
MW
934 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
935 &status);
b0407983
MW
936 if (ret == -EOPNOTSUPP)
937 return 0;
4e99c98e 938 if (ret)
b0407983
MW
939 return ret;
940
941 return status ? -EIO : 0;
942}
943
944static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
945{
946 struct tb_port *p;
947 int usb4_idx = 0;
948
949 /* Assume port is primary */
950 tb_switch_for_each_port(sw, p) {
951 if (!tb_port_is_null(p))
952 continue;
953 if (tb_is_upstream_port(p))
954 continue;
955 if (!p->link_nr) {
956 if (p == port)
957 break;
958 usb4_idx++;
959 }
960 }
961
962 return usb4_idx;
963}
964
965/**
966 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
967 * @sw: USB4 router
968 * @port: USB4 port
969 *
970 * USB4 routers have direct mapping between USB4 ports and PCIe
971 * downstream adapters where the PCIe topology is extended. This
972 * function returns the corresponding downstream PCIe adapter or %NULL
973 * if no such mapping was possible.
974 */
975struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
976 const struct tb_port *port)
977{
978 int usb4_idx = usb4_port_idx(sw, port);
979 struct tb_port *p;
980 int pcie_idx = 0;
981
982 /* Find PCIe down port matching usb4_port */
983 tb_switch_for_each_port(sw, p) {
984 if (!tb_port_is_pcie_down(p))
985 continue;
986
9cac51a0 987 if (pcie_idx == usb4_idx)
b0407983
MW
988 return p;
989
990 pcie_idx++;
991 }
992
993 return NULL;
994}
995
e6f81858
RM
996/**
997 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
998 * @sw: USB4 router
999 * @port: USB4 port
1000 *
1001 * USB4 routers have direct mapping between USB4 ports and USB 3.x
1002 * downstream adapters where the USB 3.x topology is extended. This
1003 * function returns the corresponding downstream USB 3.x adapter or
1004 * %NULL if no such mapping was possible.
1005 */
1006struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
1007 const struct tb_port *port)
1008{
1009 int usb4_idx = usb4_port_idx(sw, port);
1010 struct tb_port *p;
1011 int usb_idx = 0;
1012
1013 /* Find USB3 down port matching usb4_port */
1014 tb_switch_for_each_port(sw, p) {
1015 if (!tb_port_is_usb3_down(p))
1016 continue;
1017
77cfa40f 1018 if (usb_idx == usb4_idx)
e6f81858
RM
1019 return p;
1020
1021 usb_idx++;
1022 }
1023
1024 return NULL;
1025}
1026
cae5f515
MW
1027/**
1028 * usb4_switch_add_ports() - Add USB4 ports for this router
1029 * @sw: USB4 router
1030 *
1031 * For USB4 router finds all USB4 ports and registers devices for each.
1032 * Can be called to any router.
1033 *
1034 * Return %0 in case of success and negative errno in case of failure.
1035 */
1036int usb4_switch_add_ports(struct tb_switch *sw)
1037{
1038 struct tb_port *port;
1039
1040 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
1041 return 0;
1042
1043 tb_switch_for_each_port(sw, port) {
1044 struct usb4_port *usb4;
1045
1046 if (!tb_port_is_null(port))
1047 continue;
1048 if (!port->cap_usb4)
1049 continue;
1050
1051 usb4 = usb4_port_device_add(port);
1052 if (IS_ERR(usb4)) {
1053 usb4_switch_remove_ports(sw);
1054 return PTR_ERR(usb4);
1055 }
1056
1057 port->usb4 = usb4;
1058 }
1059
1060 return 0;
1061}
1062
1063/**
1064 * usb4_switch_remove_ports() - Removes USB4 ports from this router
1065 * @sw: USB4 router
1066 *
1067 * Unregisters previously registered USB4 ports.
1068 */
1069void usb4_switch_remove_ports(struct tb_switch *sw)
1070{
1071 struct tb_port *port;
1072
1073 tb_switch_for_each_port(sw, port) {
1074 if (port->usb4) {
1075 usb4_port_device_remove(port->usb4);
1076 port->usb4 = NULL;
1077 }
1078 }
1079}
1080
b0407983
MW
1081/**
1082 * usb4_port_unlock() - Unlock USB4 downstream port
1083 * @port: USB4 port to unlock
1084 *
1085 * Unlocks USB4 downstream port so that the connection manager can
1086 * access the router below this port.
1087 */
1088int usb4_port_unlock(struct tb_port *port)
1089{
1090 int ret;
1091 u32 val;
1092
1093 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1094 if (ret)
1095 return ret;
1096
1097 val &= ~ADP_CS_4_LCK;
1098 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1099}
3b1d8d57 1100
5d2569cb
ML
1101/**
1102 * usb4_port_hotplug_enable() - Enables hotplug for a port
1103 * @port: USB4 port to operate on
1104 *
1105 * Enables hot plug events on a given port. This is only intended
1106 * to be used on lane, DP-IN, and DP-OUT adapters.
1107 */
1108int usb4_port_hotplug_enable(struct tb_port *port)
1109{
1110 int ret;
1111 u32 val;
1112
1113 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1114 if (ret)
1115 return ret;
1116
1117 val &= ~ADP_CS_5_DHP;
1118 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1119}
1120
01da6b99
S
1121/**
1122 * usb4_port_reset() - Issue downstream port reset
1123 * @port: USB4 port to reset
1124 *
1125 * Issues downstream port reset to @port.
1126 */
1127int usb4_port_reset(struct tb_port *port)
1128{
1129 int ret;
1130 u32 val;
1131
1132 if (!port->cap_usb4)
1133 return -EINVAL;
1134
1135 ret = tb_port_read(port, &val, TB_CFG_PORT,
1136 port->cap_usb4 + PORT_CS_19, 1);
1137 if (ret)
1138 return ret;
1139
1140 val |= PORT_CS_19_DPR;
1141
1142 ret = tb_port_write(port, &val, TB_CFG_PORT,
1143 port->cap_usb4 + PORT_CS_19, 1);
1144 if (ret)
1145 return ret;
1146
1147 fsleep(10000);
1148
1149 ret = tb_port_read(port, &val, TB_CFG_PORT,
1150 port->cap_usb4 + PORT_CS_19, 1);
1151 if (ret)
1152 return ret;
1153
1154 val &= ~PORT_CS_19_DPR;
1155
1156 return tb_port_write(port, &val, TB_CFG_PORT,
1157 port->cap_usb4 + PORT_CS_19, 1);
1158}
1159
e28178bf
MW
1160static int usb4_port_set_configured(struct tb_port *port, bool configured)
1161{
1162 int ret;
1163 u32 val;
1164
1165 if (!port->cap_usb4)
1166 return -EINVAL;
1167
1168 ret = tb_port_read(port, &val, TB_CFG_PORT,
1169 port->cap_usb4 + PORT_CS_19, 1);
1170 if (ret)
1171 return ret;
1172
1173 if (configured)
1174 val |= PORT_CS_19_PC;
1175 else
1176 val &= ~PORT_CS_19_PC;
1177
1178 return tb_port_write(port, &val, TB_CFG_PORT,
1179 port->cap_usb4 + PORT_CS_19, 1);
1180}
1181
1182/**
1183 * usb4_port_configure() - Set USB4 port configured
1184 * @port: USB4 router
1185 *
1186 * Sets the USB4 link to be configured for power management purposes.
1187 */
1188int usb4_port_configure(struct tb_port *port)
1189{
1190 return usb4_port_set_configured(port, true);
1191}
1192
1193/**
1194 * usb4_port_unconfigure() - Set USB4 port unconfigured
1195 * @port: USB4 router
1196 *
1197 * Sets the USB4 link to be unconfigured for power management purposes.
1198 */
1199void usb4_port_unconfigure(struct tb_port *port)
1200{
1201 usb4_port_set_configured(port, false);
1202}
1203
284652a4
MW
1204static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
1205{
1206 int ret;
1207 u32 val;
1208
1209 if (!port->cap_usb4)
1210 return -EINVAL;
1211
1212 ret = tb_port_read(port, &val, TB_CFG_PORT,
1213 port->cap_usb4 + PORT_CS_19, 1);
1214 if (ret)
1215 return ret;
1216
1217 if (configured)
1218 val |= PORT_CS_19_PID;
1219 else
1220 val &= ~PORT_CS_19_PID;
1221
1222 return tb_port_write(port, &val, TB_CFG_PORT,
1223 port->cap_usb4 + PORT_CS_19, 1);
1224}
1225
1226/**
1227 * usb4_port_configure_xdomain() - Configure port for XDomain
1228 * @port: USB4 port connected to another host
f9cad07b 1229 * @xd: XDomain that is connected to the port
284652a4 1230 *
f9cad07b
MW
1231 * Marks the USB4 port as being connected to another host and updates
1232 * the link type. Returns %0 in success and negative errno in failure.
284652a4 1233 */
f9cad07b 1234int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
284652a4 1235{
f9cad07b 1236 xd->link_usb4 = link_is_usb4(port);
284652a4
MW
1237 return usb4_set_xdomain_configured(port, true);
1238}
1239
1240/**
1241 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
1242 * @port: USB4 port that was connected to another host
1243 *
1244 * Clears USB4 port from being marked as XDomain.
1245 */
1246void usb4_port_unconfigure_xdomain(struct tb_port *port)
1247{
1248 usb4_set_xdomain_configured(port, false);
1249}
1250
3b1d8d57 1251static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
c6ca1ac9 1252 u32 value, int timeout_msec, unsigned long delay_usec)
3b1d8d57
MW
1253{
1254 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1255
1256 do {
1257 u32 val;
1258 int ret;
1259
1260 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
1261 if (ret)
1262 return ret;
1263
1264 if ((val & bit) == value)
1265 return 0;
1266
c6ca1ac9 1267 fsleep(delay_usec);
3b1d8d57
MW
1268 } while (ktime_before(ktime_get(), timeout));
1269
1270 return -ETIMEDOUT;
1271}
1272
02d12855
RM
1273static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
1274{
7c81a578 1275 if (dwords > USB4_DATA_DWORDS)
02d12855
RM
1276 return -EINVAL;
1277
1278 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1279 dwords);
1280}
1281
1282static int usb4_port_write_data(struct tb_port *port, const void *data,
1283 size_t dwords)
1284{
7c81a578 1285 if (dwords > USB4_DATA_DWORDS)
02d12855
RM
1286 return -EINVAL;
1287
1288 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1289 dwords);
1290}
1291
1292static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
1293 u8 index, u8 reg, void *buf, u8 size)
1294{
1295 size_t dwords = DIV_ROUND_UP(size, 4);
1296 int ret;
1297 u32 val;
1298
1299 if (!port->cap_usb4)
1300 return -EINVAL;
1301
1302 val = reg;
1303 val |= size << PORT_CS_1_LENGTH_SHIFT;
1304 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1305 if (target == USB4_SB_TARGET_RETIMER)
1306 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1307 val |= PORT_CS_1_PND;
1308
1309 ret = tb_port_write(port, &val, TB_CFG_PORT,
1310 port->cap_usb4 + PORT_CS_1, 1);
1311 if (ret)
1312 return ret;
1313
1314 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
c6ca1ac9 1315 PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
02d12855
RM
1316 if (ret)
1317 return ret;
1318
1319 ret = tb_port_read(port, &val, TB_CFG_PORT,
1320 port->cap_usb4 + PORT_CS_1, 1);
1321 if (ret)
1322 return ret;
1323
1324 if (val & PORT_CS_1_NR)
1325 return -ENODEV;
1326 if (val & PORT_CS_1_RC)
1327 return -EIO;
1328
1329 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1330}
1331
1332static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1333 u8 index, u8 reg, const void *buf, u8 size)
1334{
1335 size_t dwords = DIV_ROUND_UP(size, 4);
1336 int ret;
1337 u32 val;
1338
1339 if (!port->cap_usb4)
1340 return -EINVAL;
1341
1342 if (buf) {
1343 ret = usb4_port_write_data(port, buf, dwords);
1344 if (ret)
1345 return ret;
1346 }
1347
1348 val = reg;
1349 val |= size << PORT_CS_1_LENGTH_SHIFT;
1350 val |= PORT_CS_1_WNR_WRITE;
1351 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1352 if (target == USB4_SB_TARGET_RETIMER)
1353 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1354 val |= PORT_CS_1_PND;
1355
1356 ret = tb_port_write(port, &val, TB_CFG_PORT,
1357 port->cap_usb4 + PORT_CS_1, 1);
1358 if (ret)
1359 return ret;
1360
1361 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
c6ca1ac9 1362 PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
02d12855
RM
1363 if (ret)
1364 return ret;
1365
1366 ret = tb_port_read(port, &val, TB_CFG_PORT,
1367 port->cap_usb4 + PORT_CS_1, 1);
1368 if (ret)
1369 return ret;
1370
1371 if (val & PORT_CS_1_NR)
1372 return -ENODEV;
1373 if (val & PORT_CS_1_RC)
1374 return -EIO;
1375
1376 return 0;
1377}
1378
1f15af76
AS
1379static int usb4_port_sb_opcode_err_to_errno(u32 val)
1380{
1381 switch (val) {
1382 case 0:
1383 return 0;
1384 case USB4_SB_OPCODE_ERR:
1385 return -EAGAIN;
1386 case USB4_SB_OPCODE_ONS:
1387 return -EOPNOTSUPP;
1388 default:
1389 return -EIO;
1390 }
1391}
1392
02d12855
RM
1393static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1394 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1395{
1396 ktime_t timeout;
1397 u32 val;
1398 int ret;
1399
1400 val = opcode;
1401 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1402 sizeof(val));
1403 if (ret)
1404 return ret;
1405
1406 timeout = ktime_add_ms(ktime_get(), timeout_msec);
1407
1408 do {
1409 /* Check results */
1410 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1411 &val, sizeof(val));
1412 if (ret)
1413 return ret;
1414
1f15af76
AS
1415 if (val != opcode)
1416 return usb4_port_sb_opcode_err_to_errno(val);
c6ca1ac9
MW
1417
1418 fsleep(USB4_PORT_SB_DELAY);
02d12855
RM
1419 } while (ktime_before(ktime_get(), timeout));
1420
1421 return -ETIMEDOUT;
1422}
1423
3406de7c
RM
1424static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
1425{
1426 u32 val = !offline;
1427 int ret;
1428
1429 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1430 USB4_SB_METADATA, &val, sizeof(val));
1431 if (ret)
1432 return ret;
1433
1434 val = USB4_SB_OPCODE_ROUTER_OFFLINE;
1435 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1436 USB4_SB_OPCODE, &val, sizeof(val));
1437}
1438
1439/**
1440 * usb4_port_router_offline() - Put the USB4 port to offline mode
1441 * @port: USB4 port
1442 *
1443 * This function puts the USB4 port into offline mode. In this mode the
1444 * port does not react on hotplug events anymore. This needs to be
1445 * called before retimer access is done when the USB4 links is not up.
1446 *
1447 * Returns %0 in case of success and negative errno if there was an
1448 * error.
1449 */
1450int usb4_port_router_offline(struct tb_port *port)
1451{
1452 return usb4_port_set_router_offline(port, true);
1453}
1454
1455/**
1456 * usb4_port_router_online() - Put the USB4 port back to online
1457 * @port: USB4 port
1458 *
1459 * Makes the USB4 port functional again.
1460 */
1461int usb4_port_router_online(struct tb_port *port)
1462{
1463 return usb4_port_set_router_offline(port, false);
1464}
1465
02d12855
RM
1466/**
1467 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1468 * @port: USB4 port
1469 *
1470 * This forces the USB4 port to send broadcast RT transaction which
1471 * makes the retimers on the link to assign index to themselves. Returns
1472 * %0 in case of success and negative errno if there was an error.
1473 */
1474int usb4_port_enumerate_retimers(struct tb_port *port)
1475{
1476 u32 val;
1477
1478 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1479 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1480 USB4_SB_OPCODE, &val, sizeof(val));
1481}
1482
8a90e4fa
GF
1483/**
1484 * usb4_port_clx_supported() - Check if CLx is supported by the link
1485 * @port: Port to check for CLx support for
1486 *
1487 * PORT_CS_18_CPS bit reflects if the link supports CLx including
1488 * active cables (if connected on the link).
1489 */
1490bool usb4_port_clx_supported(struct tb_port *port)
1491{
1492 int ret;
1493 u32 val;
1494
1495 ret = tb_port_read(port, &val, TB_CFG_PORT,
1496 port->cap_usb4 + PORT_CS_18, 1);
1497 if (ret)
1498 return false;
1499
1500 return !!(val & PORT_CS_18_CPS);
1501}
1502
81af2952
GF
1503/**
1504 * usb4_port_asym_supported() - If the port supports asymmetric link
1505 * @port: USB4 port
1506 *
1507 * Checks if the port and the cable supports asymmetric link and returns
1508 * %true in that case.
1509 */
1510bool usb4_port_asym_supported(struct tb_port *port)
1511{
1512 u32 val;
1513
1514 if (!port->cap_usb4)
1515 return false;
1516
1517 if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
1518 return false;
1519
1520 return !!(val & PORT_CS_18_CSA);
1521}
1522
1523/**
1524 * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
1525 * @port: USB4 port
1526 * @width: Asymmetric width to configure
1527 *
1528 * Sets USB4 port link width to @width. Can be called for widths where
1529 * usb4_port_asym_width_supported() returned @true.
1530 */
1531int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
1532{
1533 u32 val;
1534 int ret;
1535
1536 if (!port->cap_phy)
1537 return -EINVAL;
1538
1539 ret = tb_port_read(port, &val, TB_CFG_PORT,
1540 port->cap_phy + LANE_ADP_CS_1, 1);
1541 if (ret)
1542 return ret;
1543
1544 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
1545 switch (width) {
1546 case TB_LINK_WIDTH_DUAL:
1547 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
1548 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
1549 break;
1550 case TB_LINK_WIDTH_ASYM_TX:
1551 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
1552 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
1553 break;
1554 case TB_LINK_WIDTH_ASYM_RX:
1555 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
1556 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
1557 break;
1558 default:
1559 return -EINVAL;
1560 }
1561
1562 return tb_port_write(port, &val, TB_CFG_PORT,
1563 port->cap_phy + LANE_ADP_CS_1, 1);
1564}
1565
1566/**
1567 * usb4_port_asym_start() - Start symmetry change and wait for completion
1568 * @port: USB4 port
1569 *
1570 * Start symmetry change of the link to asymmetric or symmetric
1571 * (according to what was previously set in tb_port_set_link_width().
1572 * Wait for completion of the change.
1573 *
1574 * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
1575 * a negative errno in case of a failure.
1576 */
1577int usb4_port_asym_start(struct tb_port *port)
1578{
1579 int ret;
1580 u32 val;
1581
1582 ret = tb_port_read(port, &val, TB_CFG_PORT,
1583 port->cap_usb4 + PORT_CS_19, 1);
1584 if (ret)
1585 return ret;
1586
1587 val &= ~PORT_CS_19_START_ASYM;
1588 val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
1589
1590 ret = tb_port_write(port, &val, TB_CFG_PORT,
1591 port->cap_usb4 + PORT_CS_19, 1);
1592 if (ret)
1593 return ret;
1594
1595 /*
1596 * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
1597 * port started the symmetry transition.
1598 */
1599 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
c6ca1ac9
MW
1600 PORT_CS_19_START_ASYM, 0, 1000,
1601 USB4_PORT_DELAY);
81af2952
GF
1602 if (ret)
1603 return ret;
1604
1605 /* Then wait for the transtion to be completed */
1606 return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
c6ca1ac9 1607 PORT_CS_18_TIP, 0, 5000, USB4_PORT_DELAY);
81af2952
GF
1608}
1609
d0f1e0c2
MW
1610/**
1611 * usb4_port_margining_caps() - Read USB4 port marginig capabilities
1612 * @port: USB4 port
1613 * @caps: Array with at least two elements to hold the results
1614 *
1615 * Reads the USB4 port lane margining capabilities into @caps.
1616 */
1617int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
1618{
1619 int ret;
1620
1621 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1622 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
1623 if (ret)
1624 return ret;
1625
1626 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1627 USB4_SB_DATA, caps, sizeof(*caps) * 2);
1628}
1629
1630/**
1631 * usb4_port_hw_margin() - Run hardware lane margining on port
1632 * @port: USB4 port
1633 * @lanes: Which lanes to run (must match the port capabilities). Can be
1634 * %0, %1 or %7.
1635 * @ber_level: BER level contour value
1636 * @timing: Perform timing margining instead of voltage
1637 * @right_high: Use Right/high margin instead of left/low
1638 * @results: Array with at least two elements to hold the results
1639 *
1640 * Runs hardware lane margining on USB4 port and returns the result in
1641 * @results.
1642 */
1643int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
1644 unsigned int ber_level, bool timing, bool right_high,
1645 u32 *results)
1646{
1647 u32 val;
1648 int ret;
1649
1650 val = lanes;
1651 if (timing)
1652 val |= USB4_MARGIN_HW_TIME;
1653 if (right_high)
1654 val |= USB4_MARGIN_HW_RH;
1655 if (ber_level)
1656 val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
1657 USB4_MARGIN_HW_BER_MASK;
1658
1659 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1660 USB4_SB_METADATA, &val, sizeof(val));
1661 if (ret)
1662 return ret;
1663
1664 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1665 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
1666 if (ret)
1667 return ret;
1668
1669 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1670 USB4_SB_DATA, results, sizeof(*results) * 2);
1671}
1672
1673/**
1674 * usb4_port_sw_margin() - Run software lane margining on port
1675 * @port: USB4 port
1676 * @lanes: Which lanes to run (must match the port capabilities). Can be
1677 * %0, %1 or %7.
1678 * @timing: Perform timing margining instead of voltage
1679 * @right_high: Use Right/high margin instead of left/low
1680 * @counter: What to do with the error counter
1681 *
1682 * Runs software lane margining on USB4 port. Read back the error
1683 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
1684 * success and negative errno otherwise.
1685 */
1686int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
1687 bool right_high, u32 counter)
1688{
1689 u32 val;
1690 int ret;
1691
1692 val = lanes;
1693 if (timing)
1694 val |= USB4_MARGIN_SW_TIME;
1695 if (right_high)
1696 val |= USB4_MARGIN_SW_RH;
1697 val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
1698 USB4_MARGIN_SW_COUNTER_MASK;
1699
1700 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1701 USB4_SB_METADATA, &val, sizeof(val));
1702 if (ret)
1703 return ret;
1704
1705 return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1706 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
1707}
1708
1709/**
1710 * usb4_port_sw_margin_errors() - Read the software margining error counters
1711 * @port: USB4 port
1712 * @errors: Error metadata is copied here.
1713 *
1714 * This reads back the software margining error counters from the port.
1715 * Returns %0 in success and negative errno otherwise.
1716 */
1717int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
1718{
1719 int ret;
1720
1721 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1722 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
1723 if (ret)
1724 return ret;
1725
1726 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1727 USB4_SB_METADATA, errors, sizeof(*errors));
1728}
1729
02d12855
RM
1730static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1731 enum usb4_sb_opcode opcode,
1732 int timeout_msec)
1733{
1734 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1735 timeout_msec);
1736}
1737
3406de7c
RM
1738/**
1739 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
1740 * @port: USB4 port
1741 * @index: Retimer index
1742 *
1743 * Enables sideband channel transations on SBTX. Can be used when USB4
1744 * link does not go up, for example if there is no device connected.
1745 */
1746int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
1747{
1748 int ret;
1749
1750 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1751 500);
1752
1753 if (ret != -ENODEV)
1754 return ret;
1755
1756 /*
1757 * Per the USB4 retimer spec, the retimer is not required to
1758 * send an RT (Retimer Transaction) response for the first
1759 * SET_INBOUND_SBTX command
1760 */
1761 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1762 500);
1763}
1764
cd0c1e58
GF
1765/**
1766 * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
1767 * @port: USB4 port
1768 * @index: Retimer index
1769 *
1770 * Disables sideband channel transations on SBTX. The reverse of
1771 * usb4_port_retimer_set_inbound_sbtx().
1772 */
1773int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
1774{
1775 return usb4_port_retimer_op(port, index,
1776 USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
1777}
1778
02d12855
RM
1779/**
1780 * usb4_port_retimer_read() - Read from retimer sideband registers
1781 * @port: USB4 port
1782 * @index: Retimer index
1783 * @reg: Sideband register to read
1784 * @buf: Data from @reg is stored here
1785 * @size: Number of bytes to read
1786 *
1787 * Function reads retimer sideband registers starting from @reg. The
1788 * retimer is connected to @port at @index. Returns %0 in case of
1789 * success, and read data is copied to @buf. If there is no retimer
1790 * present at given @index returns %-ENODEV. In any other failure
1791 * returns negative errno.
1792 */
1793int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1794 u8 size)
1795{
1796 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1797 size);
1798}
1799
1800/**
1801 * usb4_port_retimer_write() - Write to retimer sideband registers
1802 * @port: USB4 port
1803 * @index: Retimer index
1804 * @reg: Sideband register to write
1805 * @buf: Data that is written starting from @reg
1806 * @size: Number of bytes to write
1807 *
1808 * Writes retimer sideband registers starting from @reg. The retimer is
1809 * connected to @port at @index. Returns %0 in case of success. If there
1810 * is no retimer present at given @index returns %-ENODEV. In any other
1811 * failure returns negative errno.
1812 */
1813int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1814 const void *buf, u8 size)
1815{
1816 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1817 size);
1818}
1819
1820/**
1821 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1822 * @port: USB4 port
1823 * @index: Retimer index
1824 *
1825 * If the retimer at @index is last one (connected directly to the
1826 * Type-C port) this function returns %1. If it is not returns %0. If
1827 * the retimer is not present returns %-ENODEV. Otherwise returns
1828 * negative errno.
1829 */
1830int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1831{
1832 u32 metadata;
1833 int ret;
1834
1835 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1836 500);
1837 if (ret)
1838 return ret;
1839
1840 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1841 sizeof(metadata));
1842 return ret ? ret : metadata & 1;
1843}
1844
1845/**
1846 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1847 * @port: USB4 port
1848 * @index: Retimer index
1849 *
1850 * Reads NVM sector size (in bytes) of a retimer at @index. This
1851 * operation can be used to determine whether the retimer supports NVM
1852 * upgrade for example. Returns sector size in bytes or negative errno
1853 * in case of error. Specifically returns %-ENODEV if there is no
1854 * retimer at @index.
1855 */
1856int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1857{
1858 u32 metadata;
1859 int ret;
1860
1861 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1862 500);
1863 if (ret)
1864 return ret;
1865
1866 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1867 sizeof(metadata));
1868 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1869}
1870
faa1c615
RM
1871/**
1872 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
1873 * @port: USB4 port
1874 * @index: Retimer index
1875 * @address: Start offset
1876 *
1877 * Exlicitly sets NVM write offset. Normally when writing to NVM this is
1878 * done automatically by usb4_port_retimer_nvm_write().
1879 *
1880 * Returns %0 in success and negative errno if there was a failure.
1881 */
1882int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1883 unsigned int address)
02d12855
RM
1884{
1885 u32 metadata, dwaddress;
1886 int ret;
1887
1888 dwaddress = address / 4;
1889 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1890 USB4_NVM_SET_OFFSET_MASK;
1891
1892 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1893 sizeof(metadata));
1894 if (ret)
1895 return ret;
1896
1897 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1898 500);
1899}
1900
1901struct retimer_info {
1902 struct tb_port *port;
1903 u8 index;
1904};
1905
9b383037
MW
1906static int usb4_port_retimer_nvm_write_next_block(void *data,
1907 unsigned int dwaddress, const void *buf, size_t dwords)
02d12855
RM
1908
1909{
1910 const struct retimer_info *info = data;
1911 struct tb_port *port = info->port;
1912 u8 index = info->index;
1913 int ret;
1914
1915 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1916 buf, dwords * 4);
1917 if (ret)
1918 return ret;
1919
1920 return usb4_port_retimer_op(port, index,
1921 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1922}
1923
1924/**
1925 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1926 * @port: USB4 port
1927 * @index: Retimer index
1928 * @address: Byte address where to start the write
1929 * @buf: Data to write
1930 * @size: Size in bytes how much to write
1931 *
1932 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1933 * upgrade. Returns %0 if the data was written successfully and negative
1934 * errno in case of failure. Specifically returns %-ENODEV if there is
1935 * no retimer at @index.
1936 */
1937int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1938 const void *buf, size_t size)
1939{
1940 struct retimer_info info = { .port = port, .index = index };
1941 int ret;
1942
1943 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1944 if (ret)
1945 return ret;
1946
9b383037
MW
1947 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
1948 usb4_port_retimer_nvm_write_next_block, &info);
02d12855
RM
1949}
1950
1951/**
1952 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1953 * @port: USB4 port
1954 * @index: Retimer index
1955 *
1956 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1957 * this function can be used to trigger the NVM upgrade process. If
1958 * successful the retimer restarts with the new NVM and may not have the
1959 * index set so one needs to call usb4_port_enumerate_retimers() to
1960 * force index to be assigned.
1961 */
1962int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1963{
1964 u32 val;
1965
1966 /*
1967 * We need to use the raw operation here because once the
1968 * authentication completes the retimer index is not set anymore
1969 * so we do not get back the status now.
1970 */
1971 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1972 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1973 USB4_SB_OPCODE, &val, sizeof(val));
1974}
1975
1976/**
1977 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1978 * @port: USB4 port
1979 * @index: Retimer index
1980 * @status: Raw status code read from metadata
1981 *
1982 * This can be called after usb4_port_retimer_nvm_authenticate() and
1983 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1984 *
1985 * Returns %0 if the authentication status was successfully read. The
1986 * completion metadata (the result) is then stored into @status. If
1987 * reading the status fails, returns negative errno.
1988 */
1989int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1990 u32 *status)
1991{
1992 u32 metadata, val;
1993 int ret;
1994
1995 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1996 sizeof(val));
1997 if (ret)
1998 return ret;
1999
1f15af76
AS
2000 ret = usb4_port_sb_opcode_err_to_errno(val);
2001 switch (ret) {
02d12855
RM
2002 case 0:
2003 *status = 0;
2004 return 0;
2005
1f15af76 2006 case -EAGAIN:
02d12855
RM
2007 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
2008 &metadata, sizeof(metadata));
2009 if (ret)
2010 return ret;
2011
2012 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
2013 return 0;
2014
02d12855 2015 default:
1f15af76 2016 return ret;
02d12855
RM
2017 }
2018}
2019
2020static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
2021 void *buf, size_t dwords)
2022{
2023 const struct retimer_info *info = data;
2024 struct tb_port *port = info->port;
2025 u8 index = info->index;
2026 u32 metadata;
2027 int ret;
2028
2029 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
7c81a578 2030 if (dwords < USB4_DATA_DWORDS)
02d12855
RM
2031 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
2032
2033 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
2034 sizeof(metadata));
2035 if (ret)
2036 return ret;
2037
2038 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
2039 if (ret)
2040 return ret;
2041
2042 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
2043 dwords * 4);
2044}
2045
2046/**
2047 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
2048 * @port: USB4 port
2049 * @index: Retimer index
2050 * @address: NVM address (in bytes) to start reading
2051 * @buf: Data read from NVM is stored here
2052 * @size: Number of bytes to read
2053 *
2054 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
2055 * read was successful and negative errno in case of failure.
2056 * Specifically returns %-ENODEV if there is no retimer at @index.
2057 */
2058int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
2059 unsigned int address, void *buf, size_t size)
2060{
2061 struct retimer_info info = { .port = port, .index = index };
2062
9b383037
MW
2063 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
2064 usb4_port_retimer_nvm_read_block, &info);
02d12855
RM
2065}
2066
f0a57dd3
GF
2067static inline unsigned int
2068usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
2069{
2070 /* Take the possible bandwidth limitation into account */
2071 if (port->max_bw)
2072 return min(bw, port->max_bw);
2073 return bw;
2074}
2075
3b1d8d57
MW
2076/**
2077 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
2078 * @port: USB3 adapter port
2079 *
2080 * Return maximum supported link rate of a USB3 adapter in Mb/s.
2081 * Negative errno in case of error.
2082 */
2083int usb4_usb3_port_max_link_rate(struct tb_port *port)
2084{
2085 int ret, lr;
2086 u32 val;
2087
2088 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
2089 return -EINVAL;
2090
2091 ret = tb_port_read(port, &val, TB_CFG_PORT,
2092 port->cap_adap + ADP_USB3_CS_4, 1);
2093 if (ret)
2094 return ret;
2095
2096 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
f0a57dd3
GF
2097 ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
2098
2099 return usb4_usb3_port_max_bandwidth(port, ret);
3b1d8d57
MW
2100}
2101
3b1d8d57
MW
2102static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
2103{
2104 int ret;
2105 u32 val;
2106
2107 if (!tb_port_is_usb3_down(port))
2108 return -EINVAL;
2109 if (tb_route(port->sw))
2110 return -EINVAL;
2111
2112 ret = tb_port_read(port, &val, TB_CFG_PORT,
2113 port->cap_adap + ADP_USB3_CS_2, 1);
2114 if (ret)
2115 return ret;
2116
2117 if (request)
2118 val |= ADP_USB3_CS_2_CMR;
2119 else
2120 val &= ~ADP_USB3_CS_2_CMR;
2121
2122 ret = tb_port_write(port, &val, TB_CFG_PORT,
2123 port->cap_adap + ADP_USB3_CS_2, 1);
2124 if (ret)
2125 return ret;
2126
2127 /*
2128 * We can use val here directly as the CMR bit is in the same place
2129 * as HCA. Just mask out others.
2130 */
2131 val &= ADP_USB3_CS_2_CMR;
2132 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
c6ca1ac9
MW
2133 ADP_USB3_CS_1_HCA, val, 1500,
2134 USB4_PORT_DELAY);
3b1d8d57
MW
2135}
2136
2137static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
2138{
2139 return usb4_usb3_port_cm_request(port, true);
2140}
2141
2142static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
2143{
2144 return usb4_usb3_port_cm_request(port, false);
2145}
2146
2147static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
2148{
2149 unsigned long uframes;
2150
4c767ce4 2151 uframes = bw * 512UL << scale;
5d883668 2152 return DIV_ROUND_CLOSEST(uframes * 8000, MEGA);
3b1d8d57
MW
2153}
2154
2155static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
2156{
2157 unsigned long uframes;
2158
2159 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
5d883668 2160 uframes = ((unsigned long)mbps * MEGA) / 8000;
4c767ce4 2161 return DIV_ROUND_UP(uframes, 512UL << scale);
3b1d8d57
MW
2162}
2163
2164static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
2165 int *upstream_bw,
2166 int *downstream_bw)
2167{
2168 u32 val, bw, scale;
2169 int ret;
2170
2171 ret = tb_port_read(port, &val, TB_CFG_PORT,
2172 port->cap_adap + ADP_USB3_CS_2, 1);
2173 if (ret)
2174 return ret;
2175
2176 ret = tb_port_read(port, &scale, TB_CFG_PORT,
2177 port->cap_adap + ADP_USB3_CS_3, 1);
2178 if (ret)
2179 return ret;
2180
2181 scale &= ADP_USB3_CS_3_SCALE_MASK;
2182
2183 bw = val & ADP_USB3_CS_2_AUBW_MASK;
2184 *upstream_bw = usb3_bw_to_mbps(bw, scale);
2185
2186 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
2187 *downstream_bw = usb3_bw_to_mbps(bw, scale);
2188
2189 return 0;
2190}
2191
2192/**
2193 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
2194 * @port: USB3 adapter port
2195 * @upstream_bw: Allocated upstream bandwidth is stored here
2196 * @downstream_bw: Allocated downstream bandwidth is stored here
2197 *
2198 * Stores currently allocated USB3 bandwidth into @upstream_bw and
2199 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
2200 * errno in failure.
2201 */
2202int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
2203 int *downstream_bw)
2204{
2205 int ret;
2206
2207 ret = usb4_usb3_port_set_cm_request(port);
2208 if (ret)
2209 return ret;
2210
2211 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
2212 downstream_bw);
2213 usb4_usb3_port_clear_cm_request(port);
2214
2215 return ret;
2216}
2217
2218static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
2219 int *upstream_bw,
2220 int *downstream_bw)
2221{
2222 u32 val, bw, scale;
2223 int ret;
2224
2225 ret = tb_port_read(port, &val, TB_CFG_PORT,
2226 port->cap_adap + ADP_USB3_CS_1, 1);
2227 if (ret)
2228 return ret;
2229
2230 ret = tb_port_read(port, &scale, TB_CFG_PORT,
2231 port->cap_adap + ADP_USB3_CS_3, 1);
2232 if (ret)
2233 return ret;
2234
2235 scale &= ADP_USB3_CS_3_SCALE_MASK;
2236
2237 bw = val & ADP_USB3_CS_1_CUBW_MASK;
2238 *upstream_bw = usb3_bw_to_mbps(bw, scale);
2239
2240 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
2241 *downstream_bw = usb3_bw_to_mbps(bw, scale);
2242
2243 return 0;
2244}
2245
2246static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
2247 int upstream_bw,
2248 int downstream_bw)
2249{
2250 u32 val, ubw, dbw, scale;
c82510b1 2251 int ret, max_bw;
3b1d8d57 2252
c82510b1
MW
2253 /* Figure out suitable scale */
2254 scale = 0;
2255 max_bw = max(upstream_bw, downstream_bw);
2256 while (scale < 64) {
2257 if (mbps_to_usb3_bw(max_bw, scale) < 4096)
2258 break;
2259 scale++;
2260 }
2261
2262 if (WARN_ON(scale >= 64))
2263 return -EINVAL;
2264
2265 ret = tb_port_write(port, &scale, TB_CFG_PORT,
2266 port->cap_adap + ADP_USB3_CS_3, 1);
3b1d8d57
MW
2267 if (ret)
2268 return ret;
2269
3b1d8d57
MW
2270 ubw = mbps_to_usb3_bw(upstream_bw, scale);
2271 dbw = mbps_to_usb3_bw(downstream_bw, scale);
2272
c82510b1
MW
2273 tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
2274
3b1d8d57
MW
2275 ret = tb_port_read(port, &val, TB_CFG_PORT,
2276 port->cap_adap + ADP_USB3_CS_2, 1);
2277 if (ret)
2278 return ret;
2279
2280 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
2281 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
2282 val |= ubw;
2283
2284 return tb_port_write(port, &val, TB_CFG_PORT,
2285 port->cap_adap + ADP_USB3_CS_2, 1);
2286}
2287
2288/**
2289 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
2290 * @port: USB3 adapter port
2291 * @upstream_bw: New upstream bandwidth
2292 * @downstream_bw: New downstream bandwidth
2293 *
2294 * This can be used to set how much bandwidth is allocated for the USB3
2295 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
2296 * new values programmed to the USB3 adapter allocation registers. If
2297 * the values are lower than what is currently consumed the allocation
2298 * is set to what is currently consumed instead (consumed bandwidth
2299 * cannot be taken away by CM). The actual new values are returned in
2300 * @upstream_bw and @downstream_bw.
2301 *
2302 * Returns %0 in case of success and negative errno if there was a
2303 * failure.
2304 */
2305int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
2306 int *downstream_bw)
2307{
2308 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
2309
2310 ret = usb4_usb3_port_set_cm_request(port);
2311 if (ret)
2312 return ret;
2313
2314 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2315 &consumed_down);
2316 if (ret)
2317 goto err_request;
2318
2319 /* Don't allow it go lower than what is consumed */
2320 allocate_up = max(*upstream_bw, consumed_up);
2321 allocate_down = max(*downstream_bw, consumed_down);
2322
2323 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
2324 allocate_down);
2325 if (ret)
2326 goto err_request;
2327
2328 *upstream_bw = allocate_up;
2329 *downstream_bw = allocate_down;
2330
2331err_request:
2332 usb4_usb3_port_clear_cm_request(port);
2333 return ret;
2334}
2335
2336/**
2337 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
2338 * @port: USB3 adapter port
2339 * @upstream_bw: New allocated upstream bandwidth
2340 * @downstream_bw: New allocated downstream bandwidth
2341 *
2342 * Releases USB3 allocated bandwidth down to what is actually consumed.
2343 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
2344 *
2345 * Returns 0% in success and negative errno in case of failure.
2346 */
2347int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
2348 int *downstream_bw)
2349{
2350 int ret, consumed_up, consumed_down;
2351
2352 ret = usb4_usb3_port_set_cm_request(port);
2353 if (ret)
2354 return ret;
2355
2356 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2357 &consumed_down);
2358 if (ret)
2359 goto err_request;
2360
2361 /*
f0b94c1c 2362 * Always keep 900 Mb/s to make sure xHCI has at least some
3b1d8d57
MW
2363 * bandwidth available for isochronous traffic.
2364 */
f0b94c1c
GF
2365 if (consumed_up < 900)
2366 consumed_up = 900;
2367 if (consumed_down < 900)
2368 consumed_down = 900;
3b1d8d57
MW
2369
2370 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
2371 consumed_down);
2372 if (ret)
2373 goto err_request;
2374
2375 *upstream_bw = consumed_up;
2376 *downstream_bw = consumed_down;
2377
2378err_request:
2379 usb4_usb3_port_clear_cm_request(port);
2380 return ret;
2381}
e3273801
MW
2382
2383static bool is_usb4_dpin(const struct tb_port *port)
2384{
2385 if (!tb_port_is_dpin(port))
2386 return false;
2387 if (!tb_switch_is_usb4(port->sw))
2388 return false;
2389 return true;
2390}
2391
2392/**
2393 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
2394 * @port: DP IN adapter
2395 * @cm_id: CM ID to assign
2396 *
2397 * Sets CM ID for the @port. Returns %0 on success and negative errno
2398 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
2399 * support this.
2400 */
2401int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
2402{
2403 u32 val;
2404 int ret;
2405
2406 if (!is_usb4_dpin(port))
2407 return -EOPNOTSUPP;
2408
2409 ret = tb_port_read(port, &val, TB_CFG_PORT,
2410 port->cap_adap + ADP_DP_CS_2, 1);
2411 if (ret)
2412 return ret;
2413
2414 val &= ~ADP_DP_CS_2_CM_ID_MASK;
2415 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
2416
2417 return tb_port_write(port, &val, TB_CFG_PORT,
2418 port->cap_adap + ADP_DP_CS_2, 1);
2419}
2420
2421/**
8d73f6b8
MW
2422 * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
2423 * supported
e3273801
MW
2424 * @port: DP IN adapter to check
2425 *
2426 * Can be called to any DP IN adapter. Returns true if the adapter
2427 * supports USB4 bandwidth allocation mode, false otherwise.
2428 */
8d73f6b8 2429bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
e3273801
MW
2430{
2431 int ret;
2432 u32 val;
2433
2434 if (!is_usb4_dpin(port))
2435 return false;
2436
2437 ret = tb_port_read(port, &val, TB_CFG_PORT,
2438 port->cap_adap + DP_LOCAL_CAP, 1);
2439 if (ret)
2440 return false;
2441
2442 return !!(val & DP_COMMON_CAP_BW_MODE);
2443}
2444
2445/**
8d73f6b8
MW
2446 * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
2447 * enabled
e3273801
MW
2448 * @port: DP IN adapter to check
2449 *
2450 * Can be called to any DP IN adapter. Returns true if the bandwidth
2451 * allocation mode has been enabled, false otherwise.
2452 */
8d73f6b8 2453bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
e3273801
MW
2454{
2455 int ret;
2456 u32 val;
2457
2458 if (!is_usb4_dpin(port))
2459 return false;
2460
2461 ret = tb_port_read(port, &val, TB_CFG_PORT,
2462 port->cap_adap + ADP_DP_CS_8, 1);
2463 if (ret)
2464 return false;
2465
2466 return !!(val & ADP_DP_CS_8_DPME);
2467}
2468
2469/**
8d73f6b8
MW
2470 * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
2471 * bandwidth allocation mode
e3273801
MW
2472 * @port: DP IN adapter
2473 * @supported: Does the CM support bandwidth allocation mode
2474 *
2475 * Can be called to any DP IN adapter. Sets or clears the CM support bit
2476 * of the DP IN adapter. Returns %0 in success and negative errno
2477 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
2478 * does not support this.
2479 */
8d73f6b8
MW
2480int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
2481 bool supported)
e3273801
MW
2482{
2483 u32 val;
2484 int ret;
2485
2486 if (!is_usb4_dpin(port))
2487 return -EOPNOTSUPP;
2488
2489 ret = tb_port_read(port, &val, TB_CFG_PORT,
2490 port->cap_adap + ADP_DP_CS_2, 1);
2491 if (ret)
2492 return ret;
2493
2494 if (supported)
2495 val |= ADP_DP_CS_2_CMMS;
2496 else
2497 val &= ~ADP_DP_CS_2_CMMS;
2498
2499 return tb_port_write(port, &val, TB_CFG_PORT,
2500 port->cap_adap + ADP_DP_CS_2, 1);
2501}
2502
2503/**
2504 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
2505 * @port: DP IN adapter
2506 *
2507 * Reads bandwidth allocation Group ID from the DP IN adapter and
2508 * returns it. If the adapter does not support setting Group_ID
2509 * %-EOPNOTSUPP is returned.
2510 */
2511int usb4_dp_port_group_id(struct tb_port *port)
2512{
2513 u32 val;
2514 int ret;
2515
2516 if (!is_usb4_dpin(port))
2517 return -EOPNOTSUPP;
2518
2519 ret = tb_port_read(port, &val, TB_CFG_PORT,
2520 port->cap_adap + ADP_DP_CS_2, 1);
2521 if (ret)
2522 return ret;
2523
2524 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
2525}
2526
2527/**
2528 * usb4_dp_port_set_group_id() - Set adapter Group ID
2529 * @port: DP IN adapter
2530 * @group_id: Group ID for the adapter
2531 *
2532 * Sets bandwidth allocation mode Group ID for the DP IN adapter.
2533 * Returns %0 in case of success and negative errno otherwise.
2534 * Specifically returns %-EOPNOTSUPP if the adapter does not support
2535 * this.
2536 */
2537int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
2538{
2539 u32 val;
2540 int ret;
2541
2542 if (!is_usb4_dpin(port))
2543 return -EOPNOTSUPP;
2544
2545 ret = tb_port_read(port, &val, TB_CFG_PORT,
2546 port->cap_adap + ADP_DP_CS_2, 1);
2547 if (ret)
2548 return ret;
2549
2550 val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
2551 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
2552
2553 return tb_port_write(port, &val, TB_CFG_PORT,
2554 port->cap_adap + ADP_DP_CS_2, 1);
2555}
2556
2557/**
2558 * usb4_dp_port_nrd() - Read non-reduced rate and lanes
2559 * @port: DP IN adapter
2560 * @rate: Non-reduced rate in Mb/s is placed here
2561 * @lanes: Non-reduced lanes are placed here
2562 *
2563 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
2564 * %0 in success and negative errno otherwise. Specifically returns
2565 * %-EOPNOTSUPP if the adapter does not support this.
2566 */
2567int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
2568{
2569 u32 val, tmp;
2570 int ret;
2571
2572 if (!is_usb4_dpin(port))
2573 return -EOPNOTSUPP;
2574
2575 ret = tb_port_read(port, &val, TB_CFG_PORT,
2576 port->cap_adap + ADP_DP_CS_2, 1);
2577 if (ret)
2578 return ret;
2579
2580 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
2581 switch (tmp) {
2582 case DP_COMMON_CAP_RATE_RBR:
2583 *rate = 1620;
2584 break;
2585 case DP_COMMON_CAP_RATE_HBR:
2586 *rate = 2700;
2587 break;
2588 case DP_COMMON_CAP_RATE_HBR2:
2589 *rate = 5400;
2590 break;
2591 case DP_COMMON_CAP_RATE_HBR3:
2592 *rate = 8100;
2593 break;
2594 }
2595
2596 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
2597 switch (tmp) {
2598 case DP_COMMON_CAP_1_LANE:
2599 *lanes = 1;
2600 break;
2601 case DP_COMMON_CAP_2_LANES:
2602 *lanes = 2;
2603 break;
2604 case DP_COMMON_CAP_4_LANES:
2605 *lanes = 4;
2606 break;
2607 }
2608
2609 return 0;
2610}
2611
2612/**
2613 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
2614 * @port: DP IN adapter
2615 * @rate: Non-reduced rate in Mb/s
2616 * @lanes: Non-reduced lanes
2617 *
2618 * Before the capabilities reduction this function can be used to set
2619 * the non-reduced values for the DP IN adapter. Returns %0 in success
2620 * and negative errno otherwise. If the adapter does not support this
2621 * %-EOPNOTSUPP is returned.
2622 */
2623int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
2624{
2625 u32 val;
2626 int ret;
2627
2628 if (!is_usb4_dpin(port))
2629 return -EOPNOTSUPP;
2630
2631 ret = tb_port_read(port, &val, TB_CFG_PORT,
2632 port->cap_adap + ADP_DP_CS_2, 1);
2633 if (ret)
2634 return ret;
2635
2636 val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
2637
2638 switch (rate) {
2639 case 1620:
2640 break;
2641 case 2700:
2642 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
2643 & ADP_DP_CS_2_NRD_MLR_MASK;
2644 break;
2645 case 5400:
2646 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
2647 & ADP_DP_CS_2_NRD_MLR_MASK;
2648 break;
2649 case 8100:
2650 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
2651 & ADP_DP_CS_2_NRD_MLR_MASK;
2652 break;
2653 default:
2654 return -EINVAL;
2655 }
2656
2657 val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
2658
2659 switch (lanes) {
2660 case 1:
2661 break;
2662 case 2:
2663 val |= DP_COMMON_CAP_2_LANES;
2664 break;
2665 case 4:
2666 val |= DP_COMMON_CAP_4_LANES;
2667 break;
2668 default:
2669 return -EINVAL;
2670 }
2671
2672 return tb_port_write(port, &val, TB_CFG_PORT,
2673 port->cap_adap + ADP_DP_CS_2, 1);
2674}
2675
2676/**
2677 * usb4_dp_port_granularity() - Return granularity for the bandwidth values
2678 * @port: DP IN adapter
2679 *
2680 * Reads the programmed granularity from @port. If the DP IN adapter does
2681 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
2682 * errno in other error cases.
2683 */
2684int usb4_dp_port_granularity(struct tb_port *port)
2685{
2686 u32 val;
2687 int ret;
2688
2689 if (!is_usb4_dpin(port))
2690 return -EOPNOTSUPP;
2691
2692 ret = tb_port_read(port, &val, TB_CFG_PORT,
2693 port->cap_adap + ADP_DP_CS_2, 1);
2694 if (ret)
2695 return ret;
2696
2697 val &= ADP_DP_CS_2_GR_MASK;
2698 val >>= ADP_DP_CS_2_GR_SHIFT;
2699
2700 switch (val) {
2701 case ADP_DP_CS_2_GR_0_25G:
2702 return 250;
2703 case ADP_DP_CS_2_GR_0_5G:
2704 return 500;
2705 case ADP_DP_CS_2_GR_1G:
2706 return 1000;
2707 }
2708
2709 return -EINVAL;
2710}
2711
2712/**
2713 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
2714 * @port: DP IN adapter
2715 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
2716 *
2717 * Sets the granularity used with the estimated, allocated and requested
2718 * bandwidth. Returns %0 in success and negative errno otherwise. If the
2719 * adapter does not support this %-EOPNOTSUPP is returned.
2720 */
2721int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
2722{
2723 u32 val;
2724 int ret;
2725
2726 if (!is_usb4_dpin(port))
2727 return -EOPNOTSUPP;
2728
2729 ret = tb_port_read(port, &val, TB_CFG_PORT,
2730 port->cap_adap + ADP_DP_CS_2, 1);
2731 if (ret)
2732 return ret;
2733
2734 val &= ~ADP_DP_CS_2_GR_MASK;
2735
2736 switch (granularity) {
2737 case 250:
2738 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
2739 break;
2740 case 500:
2741 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
2742 break;
2743 case 1000:
2744 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
2745 break;
2746 default:
2747 return -EINVAL;
2748 }
2749
2750 return tb_port_write(port, &val, TB_CFG_PORT,
2751 port->cap_adap + ADP_DP_CS_2, 1);
2752}
2753
2754/**
8d73f6b8 2755 * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
e3273801
MW
2756 * @port: DP IN adapter
2757 * @bw: Estimated bandwidth in Mb/s.
2758 *
2759 * Sets the estimated bandwidth to @bw. Set the granularity by calling
2760 * usb4_dp_port_set_granularity() before calling this. The @bw is round
2761 * down to the closest granularity multiplier. Returns %0 in success
2762 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
2763 * the adapter does not support this.
2764 */
8d73f6b8 2765int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
e3273801
MW
2766{
2767 u32 val, granularity;
2768 int ret;
2769
2770 if (!is_usb4_dpin(port))
2771 return -EOPNOTSUPP;
2772
2773 ret = usb4_dp_port_granularity(port);
2774 if (ret < 0)
2775 return ret;
2776 granularity = ret;
2777
2778 ret = tb_port_read(port, &val, TB_CFG_PORT,
2779 port->cap_adap + ADP_DP_CS_2, 1);
2780 if (ret)
2781 return ret;
2782
2783 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
2784 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
2785
2786 return tb_port_write(port, &val, TB_CFG_PORT,
2787 port->cap_adap + ADP_DP_CS_2, 1);
2788}
2789
2790/**
8d73f6b8 2791 * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
e3273801
MW
2792 * @port: DP IN adapter
2793 *
2794 * Reads and returns allocated bandwidth for @port in Mb/s (taking into
2795 * account the programmed granularity). Returns negative errno in case
2796 * of error.
2797 */
8d73f6b8 2798int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
e3273801
MW
2799{
2800 u32 val, granularity;
2801 int ret;
2802
2803 if (!is_usb4_dpin(port))
2804 return -EOPNOTSUPP;
2805
2806 ret = usb4_dp_port_granularity(port);
2807 if (ret < 0)
2808 return ret;
2809 granularity = ret;
2810
2811 ret = tb_port_read(port, &val, TB_CFG_PORT,
2812 port->cap_adap + DP_STATUS, 1);
2813 if (ret)
2814 return ret;
2815
2816 val &= DP_STATUS_ALLOCATED_BW_MASK;
2817 val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
2818
2819 return val * granularity;
2820}
2821
2822static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
2823{
2824 u32 val;
2825 int ret;
2826
2827 ret = tb_port_read(port, &val, TB_CFG_PORT,
2828 port->cap_adap + ADP_DP_CS_2, 1);
2829 if (ret)
2830 return ret;
2831
2832 if (ack)
2833 val |= ADP_DP_CS_2_CA;
2834 else
2835 val &= ~ADP_DP_CS_2_CA;
2836
2837 return tb_port_write(port, &val, TB_CFG_PORT,
2838 port->cap_adap + ADP_DP_CS_2, 1);
2839}
2840
2841static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
2842{
2843 return __usb4_dp_port_set_cm_ack(port, true);
2844}
2845
2846static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
2847 int timeout_msec)
2848{
2849 ktime_t end;
2850 u32 val;
2851 int ret;
2852
2853 ret = __usb4_dp_port_set_cm_ack(port, false);
2854 if (ret)
2855 return ret;
2856
2857 end = ktime_add_ms(ktime_get(), timeout_msec);
2858 do {
2859 ret = tb_port_read(port, &val, TB_CFG_PORT,
2860 port->cap_adap + ADP_DP_CS_8, 1);
2861 if (ret)
2862 return ret;
2863
2864 if (!(val & ADP_DP_CS_8_DR))
2865 break;
2866
2867 usleep_range(50, 100);
2868 } while (ktime_before(ktime_get(), end));
2869
2ec67a48
MW
2870 if (val & ADP_DP_CS_8_DR) {
2871 tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
e3273801 2872 return -ETIMEDOUT;
2ec67a48 2873 }
e3273801
MW
2874
2875 ret = tb_port_read(port, &val, TB_CFG_PORT,
2876 port->cap_adap + ADP_DP_CS_2, 1);
2877 if (ret)
2878 return ret;
2879
2880 val &= ~ADP_DP_CS_2_CA;
2881 return tb_port_write(port, &val, TB_CFG_PORT,
2882 port->cap_adap + ADP_DP_CS_2, 1);
2883}
2884
2885/**
8d73f6b8 2886 * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
e3273801
MW
2887 * @port: DP IN adapter
2888 * @bw: New allocated bandwidth in Mb/s
2889 *
2890 * Communicates the new allocated bandwidth with the DPCD (graphics
2891 * driver). Takes into account the programmed granularity. Returns %0 in
2892 * success and negative errno in case of error.
2893 */
8d73f6b8 2894int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
e3273801
MW
2895{
2896 u32 val, granularity;
2897 int ret;
2898
2899 if (!is_usb4_dpin(port))
2900 return -EOPNOTSUPP;
2901
2902 ret = usb4_dp_port_granularity(port);
2903 if (ret < 0)
2904 return ret;
2905 granularity = ret;
2906
2907 ret = tb_port_read(port, &val, TB_CFG_PORT,
2908 port->cap_adap + DP_STATUS, 1);
2909 if (ret)
2910 return ret;
2911
2912 val &= ~DP_STATUS_ALLOCATED_BW_MASK;
2913 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
2914
2915 ret = tb_port_write(port, &val, TB_CFG_PORT,
2916 port->cap_adap + DP_STATUS, 1);
2917 if (ret)
2918 return ret;
2919
2920 ret = usb4_dp_port_set_cm_ack(port);
2921 if (ret)
2922 return ret;
2923
2924 return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
2925}
2926
2927/**
8d73f6b8 2928 * usb4_dp_port_requested_bandwidth() - Read requested bandwidth
e3273801
MW
2929 * @port: DP IN adapter
2930 *
2931 * Reads the DPCD (graphics driver) requested bandwidth and returns it
2932 * in Mb/s. Takes the programmed granularity into account. In case of
2933 * error returns negative errno. Specifically returns %-EOPNOTSUPP if
ace75e18
MW
2934 * the adapter does not support bandwidth allocation mode, and %ENODATA
2935 * if there is no active bandwidth request from the graphics driver.
e3273801 2936 */
8d73f6b8 2937int usb4_dp_port_requested_bandwidth(struct tb_port *port)
e3273801
MW
2938{
2939 u32 val, granularity;
2940 int ret;
2941
2942 if (!is_usb4_dpin(port))
2943 return -EOPNOTSUPP;
2944
2945 ret = usb4_dp_port_granularity(port);
2946 if (ret < 0)
2947 return ret;
2948 granularity = ret;
2949
2950 ret = tb_port_read(port, &val, TB_CFG_PORT,
2951 port->cap_adap + ADP_DP_CS_8, 1);
2952 if (ret)
ace75e18 2953 return ret;
e3273801
MW
2954
2955 if (!(val & ADP_DP_CS_8_DR))
ace75e18 2956 return -ENODATA;
e3273801
MW
2957
2958 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
2959}
6e19d48e
GF
2960
2961/**
2962 * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
2963 * @port: PCIe adapter
2964 * @enable: Enable/disable extended encapsulation
2965 *
2966 * Enables or disables extended encapsulation used in PCIe tunneling. Caller
2967 * needs to make sure both adapters support this before enabling. Returns %0 on
2968 * success and negative errno otherwise.
2969 */
2970int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
2971{
2972 u32 val;
2973 int ret;
2974
2975 if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
2976 return -EINVAL;
2977
2978 ret = tb_port_read(port, &val, TB_CFG_PORT,
2979 port->cap_adap + ADP_PCIE_CS_1, 1);
2980 if (ret)
2981 return ret;
2982
2983 if (enable)
2984 val |= ADP_PCIE_CS_1_EE;
2985 else
2986 val &= ~ADP_PCIE_CS_1_EE;
2987
2988 return tb_port_write(port, &val, TB_CFG_PORT,
2989 port->cap_adap + ADP_PCIE_CS_1, 1);
2990}