Linux 6.5-rc4
[linux-block.git] / drivers / thunderbolt / tmu.c
CommitLineData
cf29b9af
RM
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt Time Management Unit (TMU) support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10#include <linux/delay.h>
11
12#include "tb.h"
13
d49b4f04
MW
14static const unsigned int tmu_rates[] = {
15 [TB_SWITCH_TMU_MODE_OFF] = 0,
16 [TB_SWITCH_TMU_MODE_LOWRES] = 1000,
17 [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
18 [TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
19 [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
20};
21
22const struct {
23 unsigned int freq_meas_window;
24 unsigned int avg_const;
25 unsigned int delta_avg_const;
26 unsigned int repl_timeout;
27 unsigned int repl_threshold;
28 unsigned int repl_n;
29 unsigned int dirswitch_n;
30} tmu_params[] = {
31 [TB_SWITCH_TMU_MODE_OFF] = { },
32 [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
33 [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
34 [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
35 [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
36 800, 4, 0, 3125, 25, 128, 255,
37 },
38};
39
40static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
41{
42 switch (mode) {
43 case TB_SWITCH_TMU_MODE_OFF:
44 return "off";
45 case TB_SWITCH_TMU_MODE_LOWRES:
46 return "uni-directional, LowRes";
47 case TB_SWITCH_TMU_MODE_HIFI_UNI:
48 return "uni-directional, HiFi";
49 case TB_SWITCH_TMU_MODE_HIFI_BI:
50 return "bi-directional, HiFi";
51 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
52 return "enhanced uni-directional, MedRes";
53 default:
54 return "unknown";
55 }
56}
57
58static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
59{
60 return usb4_switch_version(sw) > 1;
61}
62
b017a46d 63static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
d49b4f04 64 enum tb_switch_tmu_mode mode)
b017a46d 65{
b017a46d
GF
66 u32 freq, avg, val;
67 int ret;
68
d49b4f04
MW
69 freq = tmu_params[mode].freq_meas_window;
70 avg = tmu_params[mode].avg_const;
b017a46d
GF
71
72 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73 sw->tmu.cap + TMU_RTR_CS_0, 1);
74 if (ret)
75 return ret;
76
77 val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
78 val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
79
80 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
81 sw->tmu.cap + TMU_RTR_CS_0, 1);
82 if (ret)
83 return ret;
84
85 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
86 sw->tmu.cap + TMU_RTR_CS_15, 1);
87 if (ret)
88 return ret;
89
90 val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
91 ~TMU_RTR_CS_15_DELAY_AVG_MASK &
92 ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
93 ~TMU_RTR_CS_15_ERROR_AVG_MASK;
94 val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
95 FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
96 FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
97 FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
98
d49b4f04
MW
99 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
100 sw->tmu.cap + TMU_RTR_CS_15, 1);
101 if (ret)
102 return ret;
cf29b9af 103
d49b4f04
MW
104 if (tb_switch_tmu_enhanced_is_supported(sw)) {
105 u32 delta_avg = tmu_params[mode].delta_avg_const;
cf29b9af 106
d49b4f04
MW
107 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108 sw->tmu.cap + TMU_RTR_CS_18, 1);
109 if (ret)
110 return ret;
cf29b9af 111
d49b4f04
MW
112 val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
113 val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
cf29b9af 114
d49b4f04
MW
115 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
116 sw->tmu.cap + TMU_RTR_CS_18, 1);
cf29b9af 117 }
d49b4f04
MW
118
119 return ret;
cf29b9af
RM
120}
121
d49b4f04 122static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
cf29b9af
RM
123{
124 int ret;
125 u32 val;
126
127 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
128 sw->tmu.cap + TMU_RTR_CS_0, 1);
129 if (ret)
130 return false;
131
132 return !!(val & TMU_RTR_CS_0_UCAP);
133}
134
135static int tb_switch_tmu_rate_read(struct tb_switch *sw)
136{
137 int ret;
138 u32 val;
139
140 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
141 sw->tmu.cap + TMU_RTR_CS_3, 1);
142 if (ret)
143 return ret;
144
145 val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
146 return val;
147}
148
149static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
150{
151 int ret;
152 u32 val;
153
154 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
155 sw->tmu.cap + TMU_RTR_CS_3, 1);
156 if (ret)
157 return ret;
158
159 val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
160 val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
161
162 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
163 sw->tmu.cap + TMU_RTR_CS_3, 1);
164}
165
166static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
167 u32 value)
168{
169 u32 data;
170 int ret;
171
172 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
173 if (ret)
174 return ret;
175
176 data &= ~mask;
177 data |= value;
178
179 return tb_port_write(port, &data, TB_CFG_PORT,
180 port->cap_tmu + offset, 1);
181}
182
183static int tb_port_tmu_set_unidirectional(struct tb_port *port,
184 bool unidirectional)
185{
186 u32 val;
187
188 if (!port->sw->tmu.has_ucap)
189 return 0;
190
191 val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
192 return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
193}
194
195static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
196{
197 return tb_port_tmu_set_unidirectional(port, false);
198}
199
a28ec0e1
GF
200static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
201{
202 return tb_port_tmu_set_unidirectional(port, true);
203}
204
cf29b9af
RM
205static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
206{
207 int ret;
208 u32 val;
209
210 ret = tb_port_read(port, &val, TB_CFG_PORT,
211 port->cap_tmu + TMU_ADP_CS_3, 1);
212 if (ret)
213 return false;
214
215 return val & TMU_ADP_CS_3_UDM;
216}
217
d49b4f04
MW
218static bool tb_port_tmu_is_enhanced(struct tb_port *port)
219{
220 int ret;
221 u32 val;
222
223 ret = tb_port_read(port, &val, TB_CFG_PORT,
224 port->cap_tmu + TMU_ADP_CS_8, 1);
225 if (ret)
226 return false;
227
228 return val & TMU_ADP_CS_8_EUDM;
229}
230
231/* Can be called to non-v2 lane adapters too */
232static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
233{
234 int ret;
235 u32 val;
236
237 if (!tb_switch_tmu_enhanced_is_supported(port->sw))
238 return 0;
239
240 ret = tb_port_read(port, &val, TB_CFG_PORT,
241 port->cap_tmu + TMU_ADP_CS_8, 1);
242 if (ret)
243 return ret;
244
245 if (enable)
246 val |= TMU_ADP_CS_8_EUDM;
247 else
248 val &= ~TMU_ADP_CS_8_EUDM;
249
250 return tb_port_write(port, &val, TB_CFG_PORT,
251 port->cap_tmu + TMU_ADP_CS_8, 1);
252}
253
254static int tb_port_set_tmu_mode_params(struct tb_port *port,
255 enum tb_switch_tmu_mode mode)
256{
257 u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
258 int ret;
259
260 repl_timeout = tmu_params[mode].repl_timeout;
261 repl_threshold = tmu_params[mode].repl_threshold;
262 repl_n = tmu_params[mode].repl_n;
263 dirswitch_n = tmu_params[mode].dirswitch_n;
264
265 ret = tb_port_read(port, &val, TB_CFG_PORT,
266 port->cap_tmu + TMU_ADP_CS_8, 1);
267 if (ret)
268 return ret;
269
270 val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
271 val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
272 val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
273 val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
274
275 ret = tb_port_write(port, &val, TB_CFG_PORT,
276 port->cap_tmu + TMU_ADP_CS_8, 1);
277 if (ret)
278 return ret;
279
280 ret = tb_port_read(port, &val, TB_CFG_PORT,
281 port->cap_tmu + TMU_ADP_CS_9, 1);
282 if (ret)
283 return ret;
284
285 val &= ~TMU_ADP_CS_9_REPL_N_MASK;
286 val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
287 val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
288 val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
289
290 return tb_port_write(port, &val, TB_CFG_PORT,
291 port->cap_tmu + TMU_ADP_CS_9, 1);
292}
293
294/* Can be called to non-v2 lane adapters too */
295static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
296{
297 int ret;
298 u32 val;
299
300 if (!tb_switch_tmu_enhanced_is_supported(port->sw))
301 return 0;
302
303 ret = tb_port_read(port, &val, TB_CFG_PORT,
304 port->cap_tmu + TMU_ADP_CS_9, 1);
305 if (ret)
306 return ret;
307
308 val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
309 val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
310
311 return tb_port_write(port, &val, TB_CFG_PORT,
312 port->cap_tmu + TMU_ADP_CS_9, 1);
313}
314
a28ec0e1
GF
315static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
316{
317 u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
318
319 return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
320}
321
322static int tb_port_tmu_time_sync_disable(struct tb_port *port)
323{
324 return tb_port_tmu_time_sync(port, true);
325}
326
327static int tb_port_tmu_time_sync_enable(struct tb_port *port)
328{
329 return tb_port_tmu_time_sync(port, false);
330}
331
cf29b9af
RM
332static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
333{
23ccd21c 334 u32 val, offset, bit;
cf29b9af 335 int ret;
cf29b9af 336
23ccd21c
GF
337 if (tb_switch_is_usb4(sw)) {
338 offset = sw->tmu.cap + TMU_RTR_CS_0;
339 bit = TMU_RTR_CS_0_TD;
340 } else {
341 offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
342 bit = TB_TIME_VSEC_3_CS_26_TD;
343 }
344
345 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
cf29b9af
RM
346 if (ret)
347 return ret;
348
349 if (set)
23ccd21c 350 val |= bit;
cf29b9af 351 else
23ccd21c 352 val &= ~bit;
cf29b9af 353
23ccd21c 354 return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
cf29b9af
RM
355}
356
d49b4f04
MW
357static int tmu_mode_init(struct tb_switch *sw)
358{
359 bool enhanced, ucap;
360 int ret, rate;
361
362 ucap = tb_switch_tmu_ucap_is_supported(sw);
363 if (ucap)
364 tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
365 enhanced = tb_switch_tmu_enhanced_is_supported(sw);
366 if (enhanced)
367 tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
368
369 ret = tb_switch_tmu_rate_read(sw);
370 if (ret < 0)
371 return ret;
372 rate = ret;
373
374 /* Off by default */
375 sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
376
377 if (tb_route(sw)) {
378 struct tb_port *up = tb_upstream_port(sw);
379
380 if (enhanced && tb_port_tmu_is_enhanced(up)) {
381 sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
382 } else if (ucap && tb_port_tmu_is_unidirectional(up)) {
383 if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
384 sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
385 else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
386 sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
387 } else if (rate) {
388 sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
389 }
390 } else if (rate) {
391 sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
392 }
393
394 /* Update the initial request to match the current mode */
395 sw->tmu.mode_request = sw->tmu.mode;
396 sw->tmu.has_ucap = ucap;
397
398 return 0;
399}
400
cf29b9af
RM
401/**
402 * tb_switch_tmu_init() - Initialize switch TMU structures
403 * @sw: Switch to initialized
404 *
405 * This function must be called before other TMU related functions to
406 * makes the internal structures are filled in correctly. Does not
407 * change any hardware configuration.
408 */
409int tb_switch_tmu_init(struct tb_switch *sw)
410{
411 struct tb_port *port;
412 int ret;
413
414 if (tb_switch_is_icm(sw))
415 return 0;
416
417 ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
418 if (ret > 0)
419 sw->tmu.cap = ret;
420
421 tb_switch_for_each_port(sw, port) {
422 int cap;
423
424 cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
425 if (cap > 0)
426 port->cap_tmu = cap;
427 }
428
d49b4f04
MW
429 ret = tmu_mode_init(sw);
430 if (ret)
cf29b9af
RM
431 return ret;
432
d49b4f04 433 tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
cf29b9af
RM
434 return 0;
435}
436
437/**
438 * tb_switch_tmu_post_time() - Update switch local time
439 * @sw: Switch whose time to update
440 *
441 * Updates switch local time using time posting procedure.
442 */
443int tb_switch_tmu_post_time(struct tb_switch *sw)
444{
a28ec0e1
GF
445 unsigned int post_time_high_offset, post_time_high = 0;
446 unsigned int post_local_time_offset, post_time_offset;
cf29b9af
RM
447 struct tb_switch *root_switch = sw->tb->root_switch;
448 u64 hi, mid, lo, local_time, post_time;
449 int i, ret, retries = 100;
450 u32 gm_local_time[3];
451
452 if (!tb_route(sw))
453 return 0;
454
455 if (!tb_switch_is_usb4(sw))
456 return 0;
457
458 /* Need to be able to read the grand master time */
459 if (!root_switch->tmu.cap)
460 return 0;
461
462 ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
463 root_switch->tmu.cap + TMU_RTR_CS_1,
464 ARRAY_SIZE(gm_local_time));
465 if (ret)
466 return ret;
467
468 for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
cb625ec6 469 tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
cf29b9af
RM
470 gm_local_time[i]);
471
472 /* Convert to nanoseconds (drop fractional part) */
473 hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
474 mid = gm_local_time[1];
475 lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
476 TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
477 local_time = hi << 48 | mid << 16 | lo;
478
479 /* Tell the switch that time sync is disrupted for a while */
480 ret = tb_switch_tmu_set_time_disruption(sw, true);
481 if (ret)
482 return ret;
483
484 post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
485 post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
a28ec0e1 486 post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
cf29b9af
RM
487
488 /*
489 * Write the Grandmaster time to the Post Local Time registers
490 * of the new switch.
491 */
492 ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
493 post_local_time_offset, 2);
494 if (ret)
495 goto out;
496
497 /*
a28ec0e1
GF
498 * Have the new switch update its local time by:
499 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
500 * Post Time High register.
501 * 2) write 0 to Post Time High register and then wait for
502 * the completion of the post_time register becomes 0.
503 * This means the time has been converged properly.
cf29b9af 504 */
a28ec0e1 505 post_time = 0xffffffff00000001ULL;
cf29b9af
RM
506
507 ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
508 if (ret)
509 goto out;
510
a28ec0e1
GF
511 ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
512 post_time_high_offset, 1);
513 if (ret)
514 goto out;
515
cf29b9af
RM
516 do {
517 usleep_range(5, 10);
518 ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
519 post_time_offset, 2);
520 if (ret)
521 goto out;
522 } while (--retries && post_time);
523
524 if (!retries) {
525 ret = -ETIMEDOUT;
526 goto out;
527 }
528
529 tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
530
531out:
532 tb_switch_tmu_set_time_disruption(sw, false);
533 return ret;
534}
535
d49b4f04
MW
536static int disable_enhanced(struct tb_port *up, struct tb_port *down)
537{
538 int ret;
539
540 /*
541 * Router may already been disconnected so ignore errors on the
542 * upstream port.
543 */
544 tb_port_tmu_rate_write(up, 0);
545 tb_port_tmu_enhanced_enable(up, false);
546
547 ret = tb_port_tmu_rate_write(down, 0);
548 if (ret)
549 return ret;
550 return tb_port_tmu_enhanced_enable(down, false);
551}
552
cf29b9af
RM
553/**
554 * tb_switch_tmu_disable() - Disable TMU of a switch
555 * @sw: Switch whose TMU to disable
556 *
557 * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
558 */
559int tb_switch_tmu_disable(struct tb_switch *sw)
560{
cf29b9af 561 /* Already disabled? */
d49b4f04 562 if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
cf29b9af
RM
563 return 0;
564
a28ec0e1 565 if (tb_route(sw)) {
a28ec0e1
GF
566 struct tb_port *down, *up;
567 int ret;
cf29b9af 568
7ce54221 569 down = tb_switch_downstream_port(sw);
a28ec0e1
GF
570 up = tb_upstream_port(sw);
571 /*
572 * In case of uni-directional time sync, TMU handshake is
573 * initiated by upstream router. In case of bi-directional
574 * time sync, TMU handshake is initiated by downstream router.
5fd6b9a5
GF
575 * We change downstream router's rate to off for both uni/bidir
576 * cases although it is needed only for the bi-directional mode.
577 * We avoid changing upstream router's mode since it might
578 * have another downstream router plugged, that is set to
579 * uni-directional mode and we don't want to change it's TMU
580 * mode.
a28ec0e1 581 */
d49b4f04 582 tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
a28ec0e1
GF
583
584 tb_port_tmu_time_sync_disable(up);
585 ret = tb_port_tmu_time_sync_disable(down);
cf29b9af
RM
586 if (ret)
587 return ret;
cf29b9af 588
d49b4f04
MW
589 switch (sw->tmu.mode) {
590 case TB_SWITCH_TMU_MODE_LOWRES:
591 case TB_SWITCH_TMU_MODE_HIFI_UNI:
a28ec0e1
GF
592 /* The switch may be unplugged so ignore any errors */
593 tb_port_tmu_unidirectional_disable(up);
594 ret = tb_port_tmu_unidirectional_disable(down);
595 if (ret)
596 return ret;
d49b4f04
MW
597 break;
598
599 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
600 ret = disable_enhanced(up, down);
601 if (ret)
602 return ret;
603 break;
604
605 default:
606 break;
a28ec0e1
GF
607 }
608 } else {
d49b4f04 609 tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
a28ec0e1 610 }
cf29b9af 611
d49b4f04 612 sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
cf29b9af
RM
613
614 tb_sw_dbg(sw, "TMU: disabled\n");
615 return 0;
616}
617
d49b4f04
MW
618/* Called only when there is failure enabling requested mode */
619static void tb_switch_tmu_off(struct tb_switch *sw)
a28ec0e1 620{
d49b4f04 621 unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
a28ec0e1
GF
622 struct tb_port *down, *up;
623
7ce54221 624 down = tb_switch_downstream_port(sw);
a28ec0e1
GF
625 up = tb_upstream_port(sw);
626 /*
627 * In case of any failure in one of the steps when setting
628 * bi-directional or uni-directional TMU mode, get back to the TMU
629 * configurations in off mode. In case of additional failures in
630 * the functions below, ignore them since the caller shall already
631 * report a failure.
632 */
633 tb_port_tmu_time_sync_disable(down);
634 tb_port_tmu_time_sync_disable(up);
a28ec0e1 635
d49b4f04
MW
636 switch (sw->tmu.mode_request) {
637 case TB_SWITCH_TMU_MODE_LOWRES:
638 case TB_SWITCH_TMU_MODE_HIFI_UNI:
639 tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
640 break;
641 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
642 disable_enhanced(up, down);
643 break;
644 default:
645 break;
646 }
647
648 /* Always set the rate to 0 */
649 tb_switch_tmu_rate_write(sw, rate);
650
651 tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
a28ec0e1
GF
652 tb_port_tmu_unidirectional_disable(down);
653 tb_port_tmu_unidirectional_disable(up);
654}
655
656/*
657 * This function is called when the previous TMU mode was
d49b4f04 658 * TB_SWITCH_TMU_MODE_OFF.
cf29b9af 659 */
c437dcb1 660static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
cf29b9af 661{
a28ec0e1 662 struct tb_port *up, *down;
cf29b9af
RM
663 int ret;
664
a28ec0e1 665 up = tb_upstream_port(sw);
7ce54221 666 down = tb_switch_downstream_port(sw);
cf29b9af 667
a28ec0e1
GF
668 ret = tb_port_tmu_unidirectional_disable(up);
669 if (ret)
670 return ret;
cf29b9af 671
a28ec0e1
GF
672 ret = tb_port_tmu_unidirectional_disable(down);
673 if (ret)
674 goto out;
675
d49b4f04 676 ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
a28ec0e1
GF
677 if (ret)
678 goto out;
679
680 ret = tb_port_tmu_time_sync_enable(up);
681 if (ret)
682 goto out;
683
684 ret = tb_port_tmu_time_sync_enable(down);
685 if (ret)
686 goto out;
687
688 return 0;
689
690out:
d49b4f04 691 tb_switch_tmu_off(sw);
a28ec0e1
GF
692 return ret;
693}
694
701e73a8
MW
695/* Only needed for Titan Ridge */
696static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
43f977bc 697{
701e73a8 698 struct tb_port *up = tb_upstream_port(sw);
43f977bc
GF
699 u32 val;
700 int ret;
701
702 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
703 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
704 if (ret)
705 return ret;
706
707 val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
708
701e73a8
MW
709 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
710 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
711 if (ret)
712 return ret;
43f977bc
GF
713
714 return tb_port_tmu_write(up, TMU_ADP_CS_6,
715 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
701e73a8
MW
716 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
717 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
43f977bc
GF
718}
719
a28ec0e1
GF
720/*
721 * This function is called when the previous TMU mode was
d49b4f04 722 * TB_SWITCH_TMU_MODE_OFF.
a28ec0e1 723 */
c437dcb1 724static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
a28ec0e1 725{
a28ec0e1
GF
726 struct tb_port *up, *down;
727 int ret;
728
729 up = tb_upstream_port(sw);
7ce54221
GF
730 down = tb_switch_downstream_port(sw);
731 ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
d49b4f04 732 tmu_rates[sw->tmu.mode_request]);
b017a46d
GF
733 if (ret)
734 return ret;
735
d49b4f04 736 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
cf29b9af
RM
737 if (ret)
738 return ret;
739
a28ec0e1
GF
740 ret = tb_port_tmu_unidirectional_enable(up);
741 if (ret)
742 goto out;
cf29b9af 743
a28ec0e1
GF
744 ret = tb_port_tmu_time_sync_enable(up);
745 if (ret)
746 goto out;
cf29b9af 747
a28ec0e1
GF
748 ret = tb_port_tmu_unidirectional_enable(down);
749 if (ret)
750 goto out;
cf29b9af 751
a28ec0e1
GF
752 ret = tb_port_tmu_time_sync_enable(down);
753 if (ret)
754 goto out;
cf29b9af 755
a28ec0e1
GF
756 return 0;
757
758out:
d49b4f04
MW
759 tb_switch_tmu_off(sw);
760 return ret;
761}
762
763/*
764 * This function is called when the previous TMU mode was
765 * TB_SWITCH_TMU_RATE_OFF.
766 */
767static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
768{
769 unsigned int rate = tmu_rates[sw->tmu.mode_request];
770 struct tb_port *up, *down;
771 int ret;
772
773 /* Router specific parameters first */
774 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
775 if (ret)
776 return ret;
777
778 up = tb_upstream_port(sw);
779 down = tb_switch_downstream_port(sw);
780
781 ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
782 if (ret)
783 goto out;
784
785 ret = tb_port_tmu_rate_write(up, rate);
786 if (ret)
787 goto out;
788
789 ret = tb_port_tmu_enhanced_enable(up, true);
790 if (ret)
791 goto out;
792
793 ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
794 if (ret)
795 goto out;
796
797 ret = tb_port_tmu_rate_write(down, rate);
798 if (ret)
799 goto out;
800
801 ret = tb_port_tmu_enhanced_enable(down, true);
802 if (ret)
803 goto out;
804
805 return 0;
806
807out:
808 tb_switch_tmu_off(sw);
a28ec0e1
GF
809 return ret;
810}
811
c437dcb1 812static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
b017a46d 813{
d49b4f04 814 unsigned int rate = tmu_rates[sw->tmu.mode];
b017a46d
GF
815 struct tb_port *down, *up;
816
7ce54221 817 down = tb_switch_downstream_port(sw);
b017a46d
GF
818 up = tb_upstream_port(sw);
819 /*
820 * In case of any failure in one of the steps when change mode,
821 * get back to the TMU configurations in previous mode.
822 * In case of additional failures in the functions below,
823 * ignore them since the caller shall already report a failure.
824 */
d49b4f04
MW
825 switch (sw->tmu.mode) {
826 case TB_SWITCH_TMU_MODE_LOWRES:
827 case TB_SWITCH_TMU_MODE_HIFI_UNI:
828 tb_port_tmu_set_unidirectional(down, true);
829 tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
830 break;
831
832 case TB_SWITCH_TMU_MODE_HIFI_BI:
833 tb_port_tmu_set_unidirectional(down, false);
834 tb_switch_tmu_rate_write(sw, rate);
835 break;
b017a46d 836
d49b4f04
MW
837 default:
838 break;
839 }
840
841 tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
842
843 switch (sw->tmu.mode) {
844 case TB_SWITCH_TMU_MODE_LOWRES:
845 case TB_SWITCH_TMU_MODE_HIFI_UNI:
846 tb_port_tmu_set_unidirectional(up, true);
847 break;
848
849 case TB_SWITCH_TMU_MODE_HIFI_BI:
850 tb_port_tmu_set_unidirectional(up, false);
851 break;
852
853 default:
854 break;
855 }
b017a46d
GF
856}
857
c437dcb1 858static int tb_switch_tmu_change_mode(struct tb_switch *sw)
b017a46d 859{
d49b4f04 860 unsigned int rate = tmu_rates[sw->tmu.mode_request];
b017a46d
GF
861 struct tb_port *up, *down;
862 int ret;
863
864 up = tb_upstream_port(sw);
7ce54221 865 down = tb_switch_downstream_port(sw);
b017a46d 866
d49b4f04
MW
867 /* Program the upstream router downstream facing lane adapter */
868 switch (sw->tmu.mode_request) {
869 case TB_SWITCH_TMU_MODE_LOWRES:
870 case TB_SWITCH_TMU_MODE_HIFI_UNI:
871 ret = tb_port_tmu_set_unidirectional(down, true);
872 if (ret)
873 goto out;
874 ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
875 if (ret)
876 goto out;
877 break;
878
879 case TB_SWITCH_TMU_MODE_HIFI_BI:
880 ret = tb_port_tmu_set_unidirectional(down, false);
881 if (ret)
882 goto out;
883 ret = tb_switch_tmu_rate_write(sw, rate);
884 if (ret)
885 goto out;
886 break;
b017a46d 887
d49b4f04
MW
888 default:
889 /* Not allowed to change modes from other than above */
890 return -EINVAL;
891 }
892
893 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
b017a46d
GF
894 if (ret)
895 return ret;
896
d49b4f04
MW
897 /* Program the new mode and the downstream router lane adapter */
898 switch (sw->tmu.mode_request) {
899 case TB_SWITCH_TMU_MODE_LOWRES:
900 case TB_SWITCH_TMU_MODE_HIFI_UNI:
901 ret = tb_port_tmu_set_unidirectional(up, true);
902 if (ret)
903 goto out;
904 break;
905
906 case TB_SWITCH_TMU_MODE_HIFI_BI:
907 ret = tb_port_tmu_set_unidirectional(up, false);
908 if (ret)
909 goto out;
910 break;
911
912 default:
913 /* Not allowed to change modes from other than above */
914 return -EINVAL;
915 }
b017a46d
GF
916
917 ret = tb_port_tmu_time_sync_enable(down);
918 if (ret)
919 goto out;
920
921 ret = tb_port_tmu_time_sync_enable(up);
922 if (ret)
923 goto out;
924
925 return 0;
926
927out:
c437dcb1 928 tb_switch_tmu_change_mode_prev(sw);
b017a46d
GF
929 return ret;
930}
931
932/**
933 * tb_switch_tmu_enable() - Enable TMU on a router
934 * @sw: Router whose TMU to enable
935 *
826f55d5
MW
936 * Enables TMU of a router to be in uni-directional Normal/HiFi or
937 * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
938 * required before calling this function.
b017a46d
GF
939 */
940int tb_switch_tmu_enable(struct tb_switch *sw)
a28ec0e1 941{
a28ec0e1
GF
942 int ret;
943
826f55d5 944 if (tb_switch_tmu_is_enabled(sw))
a28ec0e1
GF
945 return 0;
946
d49b4f04
MW
947 if (tb_switch_is_titan_ridge(sw) &&
948 (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
949 sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
701e73a8 950 ret = tb_switch_tmu_disable_objections(sw);
43f977bc
GF
951 if (ret)
952 return ret;
953 }
954
a28ec0e1
GF
955 ret = tb_switch_tmu_set_time_disruption(sw, true);
956 if (ret)
957 return ret;
958
959 if (tb_route(sw)) {
b017a46d
GF
960 /*
961 * The used mode changes are from OFF to
962 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
963 * HiFi-Uni.
964 */
d49b4f04
MW
965 if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
966 switch (sw->tmu.mode_request) {
967 case TB_SWITCH_TMU_MODE_LOWRES:
968 case TB_SWITCH_TMU_MODE_HIFI_UNI:
c437dcb1 969 ret = tb_switch_tmu_enable_unidirectional(sw);
d49b4f04
MW
970 break;
971
972 case TB_SWITCH_TMU_MODE_HIFI_BI:
c437dcb1 973 ret = tb_switch_tmu_enable_bidirectional(sw);
d49b4f04
MW
974 break;
975 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
976 ret = tb_switch_tmu_enable_enhanced(sw);
977 break;
978 default:
979 ret = -EINVAL;
980 break;
981 }
982 } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
983 sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
984 sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
c437dcb1 985 ret = tb_switch_tmu_change_mode(sw);
d49b4f04
MW
986 } else {
987 ret = -EINVAL;
a28ec0e1 988 }
cf29b9af 989 } else {
a28ec0e1
GF
990 /*
991 * Host router port configurations are written as
992 * part of configurations for downstream port of the parent
993 * of the child node - see above.
994 * Here only the host router' rate configuration is written.
995 */
d49b4f04 996 ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
cf29b9af
RM
997 }
998
d49b4f04
MW
999 if (ret) {
1000 tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1001 tmu_mode_name(sw->tmu.mode_request), ret);
1002 } else {
1003 sw->tmu.mode = sw->tmu.mode_request;
1004 tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1005 }
cf29b9af
RM
1006
1007 return tb_switch_tmu_set_time_disruption(sw, false);
1008}
a28ec0e1 1009
a28ec0e1 1010/**
d49b4f04 1011 * tb_switch_tmu_configure() - Configure the TMU mode
a28ec0e1 1012 * @sw: Router whose mode to change
d49b4f04 1013 * @mode: Mode to configure
a28ec0e1 1014 *
d49b4f04
MW
1015 * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1016 * next called.
ef34add8 1017 *
d49b4f04
MW
1018 * Returns %0 in success and negative errno otherwise. Specifically
1019 * returns %-EOPNOTSUPP if the requested mode is not possible (not
1020 * supported by the router and/or topology).
a28ec0e1 1021 */
d49b4f04 1022int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
a28ec0e1 1023{
d49b4f04
MW
1024 switch (mode) {
1025 case TB_SWITCH_TMU_MODE_OFF:
1026 break;
1027
1028 case TB_SWITCH_TMU_MODE_LOWRES:
1029 case TB_SWITCH_TMU_MODE_HIFI_UNI:
1030 if (!sw->tmu.has_ucap)
1031 return -EOPNOTSUPP;
1032 break;
1033
1034 case TB_SWITCH_TMU_MODE_HIFI_BI:
1035 break;
1036
1037 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1038 const struct tb_switch *parent_sw = tb_switch_parent(sw);
1039
1040 if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1041 return -EOPNOTSUPP;
1042 if (!tb_switch_tmu_enhanced_is_supported(sw))
1043 return -EOPNOTSUPP;
1044
1045 break;
1046 }
1047
1048 default:
1049 tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
ef34add8 1050 return -EINVAL;
d49b4f04
MW
1051 }
1052
1053 if (sw->tmu.mode_request != mode) {
1054 tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1055 tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1056 sw->tmu.mode_request = mode;
1057 }
ef34add8 1058
ef34add8 1059 return 0;
a28ec0e1 1060}