thunderbolt: Make PCIe tunnel setup and teardown follow CM guide
[linux-block.git] / drivers / thunderbolt / tunnel.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
14
15 #include "tunnel.h"
16 #include "tb.h"
17
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID                    8
20
21 #define TB_PCI_PATH_DOWN                0
22 #define TB_PCI_PATH_UP                  1
23
24 #define TB_PCI_PRIORITY                 3
25 #define TB_PCI_WEIGHT                   1
26
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID                   8
29
30 #define TB_USB3_PATH_DOWN               0
31 #define TB_USB3_PATH_UP                 1
32
33 #define TB_USB3_PRIORITY                3
34 #define TB_USB3_WEIGHT                  2
35
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID              8
38 #define TB_DP_AUX_RX_HOPID              8
39 #define TB_DP_VIDEO_HOPID               9
40
41 #define TB_DP_VIDEO_PATH_OUT            0
42 #define TB_DP_AUX_PATH_OUT              1
43 #define TB_DP_AUX_PATH_IN               2
44
45 #define TB_DP_VIDEO_PRIORITY            1
46 #define TB_DP_VIDEO_WEIGHT              1
47
48 #define TB_DP_AUX_PRIORITY              2
49 #define TB_DP_AUX_WEIGHT                1
50
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS             6U
53 /*
54  * Number of credits we try to allocate for each DMA path if not limited
55  * by the host router baMaxHI.
56  */
57 #define TB_DMA_CREDITS                  14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS              1
60
61 #define TB_DMA_PRIORITY                 5
62 #define TB_DMA_WEIGHT                   1
63
64 /*
65  * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66  * according to USB4 v2 Connection Manager guide. This ends up reserving
67  * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68  * account.
69  */
70 #define USB4_V2_PCI_MIN_BANDWIDTH       (1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH      (1500 * TB_USB3_WEIGHT)
72
73 static unsigned int dma_credits = TB_DMA_CREDITS;
74 module_param(dma_credits, uint, 0444);
75 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
76                 __MODULE_STRING(TB_DMA_CREDITS) ")");
77
78 static bool bw_alloc_mode = true;
79 module_param(bw_alloc_mode, bool, 0444);
80 MODULE_PARM_DESC(bw_alloc_mode,
81                  "enable bandwidth allocation mode if supported (default: true)");
82
83 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
84
85 static inline unsigned int tb_usable_credits(const struct tb_port *port)
86 {
87         return port->total_credits - port->ctl_credits;
88 }
89
90 /**
91  * tb_available_credits() - Available credits for PCIe and DMA
92  * @port: Lane adapter to check
93  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
94  *                  streams possible through this lane adapter
95  */
96 static unsigned int tb_available_credits(const struct tb_port *port,
97                                          size_t *max_dp_streams)
98 {
99         const struct tb_switch *sw = port->sw;
100         int credits, usb3, pcie, spare;
101         size_t ndp;
102
103         usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
104         pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
105
106         if (tb_acpi_is_xdomain_allowed()) {
107                 spare = min_not_zero(sw->max_dma_credits, dma_credits);
108                 /* Add some credits for potential second DMA tunnel */
109                 spare += TB_MIN_DMA_CREDITS;
110         } else {
111                 spare = 0;
112         }
113
114         credits = tb_usable_credits(port);
115         if (tb_acpi_may_tunnel_dp()) {
116                 /*
117                  * Maximum number of DP streams possible through the
118                  * lane adapter.
119                  */
120                 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
121                         ndp = (credits - (usb3 + pcie + spare)) /
122                               (sw->min_dp_aux_credits + sw->min_dp_main_credits);
123                 else
124                         ndp = 0;
125         } else {
126                 ndp = 0;
127         }
128         credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
129         credits -= usb3;
130
131         if (max_dp_streams)
132                 *max_dp_streams = ndp;
133
134         return credits > 0 ? credits : 0;
135 }
136
137 static void tb_init_pm_support(struct tb_path_hop *hop)
138 {
139         struct tb_port *out_port = hop->out_port;
140         struct tb_port *in_port = hop->in_port;
141
142         if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
143             usb4_switch_version(in_port->sw) >= 2)
144                 hop->pm_support = true;
145 }
146
147 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
148                                          enum tb_tunnel_type type)
149 {
150         struct tb_tunnel *tunnel;
151
152         tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
153         if (!tunnel)
154                 return NULL;
155
156         tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
157         if (!tunnel->paths) {
158                 tb_tunnel_free(tunnel);
159                 return NULL;
160         }
161
162         INIT_LIST_HEAD(&tunnel->list);
163         tunnel->tb = tb;
164         tunnel->npaths = npaths;
165         tunnel->type = type;
166
167         return tunnel;
168 }
169
170 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
171 {
172         struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
173         int ret;
174
175         /* Only supported of both routers are at least USB4 v2 */
176         if (tb_port_get_link_generation(port) < 4)
177                 return 0;
178
179         ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
180         if (ret)
181                 return ret;
182
183         ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
184         if (ret)
185                 return ret;
186
187         tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
188                       str_enabled_disabled(enable));
189         return 0;
190 }
191
192 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
193 {
194         int res;
195
196         if (activate) {
197                 res = tb_pci_set_ext_encapsulation(tunnel, activate);
198                 if (res)
199                         return res;
200         }
201
202         if (activate)
203                 res = tb_pci_port_enable(tunnel->dst_port, activate);
204         else
205                 res = tb_pci_port_enable(tunnel->src_port, activate);
206         if (res)
207                 return res;
208
209
210         if (activate) {
211                 res = tb_pci_port_enable(tunnel->src_port, activate);
212                 if (res)
213                         return res;
214         } else {
215                 /* Downstream router could be unplugged */
216                 tb_pci_port_enable(tunnel->dst_port, activate);
217         }
218
219         return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
220 }
221
222 static int tb_pci_init_credits(struct tb_path_hop *hop)
223 {
224         struct tb_port *port = hop->in_port;
225         struct tb_switch *sw = port->sw;
226         unsigned int credits;
227
228         if (tb_port_use_credit_allocation(port)) {
229                 unsigned int available;
230
231                 available = tb_available_credits(port, NULL);
232                 credits = min(sw->max_pcie_credits, available);
233
234                 if (credits < TB_MIN_PCIE_CREDITS)
235                         return -ENOSPC;
236
237                 credits = max(TB_MIN_PCIE_CREDITS, credits);
238         } else {
239                 if (tb_port_is_null(port))
240                         credits = port->bonded ? 32 : 16;
241                 else
242                         credits = 7;
243         }
244
245         hop->initial_credits = credits;
246         return 0;
247 }
248
249 static int tb_pci_init_path(struct tb_path *path)
250 {
251         struct tb_path_hop *hop;
252
253         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
254         path->egress_shared_buffer = TB_PATH_NONE;
255         path->ingress_fc_enable = TB_PATH_ALL;
256         path->ingress_shared_buffer = TB_PATH_NONE;
257         path->priority = TB_PCI_PRIORITY;
258         path->weight = TB_PCI_WEIGHT;
259         path->drop_packages = 0;
260
261         tb_path_for_each_hop(path, hop) {
262                 int ret;
263
264                 ret = tb_pci_init_credits(hop);
265                 if (ret)
266                         return ret;
267         }
268
269         return 0;
270 }
271
272 /**
273  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
274  * @tb: Pointer to the domain structure
275  * @down: PCIe downstream adapter
276  * @alloc_hopid: Allocate HopIDs from visited ports
277  *
278  * If @down adapter is active, follows the tunnel to the PCIe upstream
279  * adapter and back. Returns the discovered tunnel or %NULL if there was
280  * no tunnel.
281  */
282 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
283                                          bool alloc_hopid)
284 {
285         struct tb_tunnel *tunnel;
286         struct tb_path *path;
287
288         if (!tb_pci_port_is_enabled(down))
289                 return NULL;
290
291         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
292         if (!tunnel)
293                 return NULL;
294
295         tunnel->activate = tb_pci_activate;
296         tunnel->src_port = down;
297
298         /*
299          * Discover both paths even if they are not complete. We will
300          * clean them up by calling tb_tunnel_deactivate() below in that
301          * case.
302          */
303         path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
304                                 &tunnel->dst_port, "PCIe Up", alloc_hopid);
305         if (!path) {
306                 /* Just disable the downstream port */
307                 tb_pci_port_enable(down, false);
308                 goto err_free;
309         }
310         tunnel->paths[TB_PCI_PATH_UP] = path;
311         if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
312                 goto err_free;
313
314         path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
315                                 "PCIe Down", alloc_hopid);
316         if (!path)
317                 goto err_deactivate;
318         tunnel->paths[TB_PCI_PATH_DOWN] = path;
319         if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
320                 goto err_deactivate;
321
322         /* Validate that the tunnel is complete */
323         if (!tb_port_is_pcie_up(tunnel->dst_port)) {
324                 tb_port_warn(tunnel->dst_port,
325                              "path does not end on a PCIe adapter, cleaning up\n");
326                 goto err_deactivate;
327         }
328
329         if (down != tunnel->src_port) {
330                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
331                 goto err_deactivate;
332         }
333
334         if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
335                 tb_tunnel_warn(tunnel,
336                                "tunnel is not fully activated, cleaning up\n");
337                 goto err_deactivate;
338         }
339
340         tb_tunnel_dbg(tunnel, "discovered\n");
341         return tunnel;
342
343 err_deactivate:
344         tb_tunnel_deactivate(tunnel);
345 err_free:
346         tb_tunnel_free(tunnel);
347
348         return NULL;
349 }
350
351 /**
352  * tb_tunnel_alloc_pci() - allocate a pci tunnel
353  * @tb: Pointer to the domain structure
354  * @up: PCIe upstream adapter port
355  * @down: PCIe downstream adapter port
356  *
357  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
358  * TB_TYPE_PCIE_DOWN.
359  *
360  * Return: Returns a tb_tunnel on success or NULL on failure.
361  */
362 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
363                                       struct tb_port *down)
364 {
365         struct tb_tunnel *tunnel;
366         struct tb_path *path;
367
368         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
369         if (!tunnel)
370                 return NULL;
371
372         tunnel->activate = tb_pci_activate;
373         tunnel->src_port = down;
374         tunnel->dst_port = up;
375
376         path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
377                              "PCIe Down");
378         if (!path)
379                 goto err_free;
380         tunnel->paths[TB_PCI_PATH_DOWN] = path;
381         if (tb_pci_init_path(path))
382                 goto err_free;
383
384         path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
385                              "PCIe Up");
386         if (!path)
387                 goto err_free;
388         tunnel->paths[TB_PCI_PATH_UP] = path;
389         if (tb_pci_init_path(path))
390                 goto err_free;
391
392         return tunnel;
393
394 err_free:
395         tb_tunnel_free(tunnel);
396         return NULL;
397 }
398
399 /**
400  * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
401  * @port: Lane 0 adapter
402  * @reserved_up: Upstream bandwidth in Mb/s to reserve
403  * @reserved_down: Downstream bandwidth in Mb/s to reserve
404  *
405  * Can be called to any connected lane 0 adapter to find out how much
406  * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
407  * Returns true if there is something to be reserved and writes the
408  * amount to @reserved_down/@reserved_up. Otherwise returns false and
409  * does not touch the parameters.
410  */
411 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
412                             int *reserved_down)
413 {
414         if (WARN_ON_ONCE(!port->remote))
415                 return false;
416
417         if (!tb_acpi_may_tunnel_pcie())
418                 return false;
419
420         if (tb_port_get_link_generation(port) < 4)
421                 return false;
422
423         /* Must have PCIe adapters */
424         if (tb_is_upstream_port(port)) {
425                 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
426                         return false;
427                 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
428                         return false;
429         } else {
430                 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
431                         return false;
432                 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
433                         return false;
434         }
435
436         *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
437         *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
438
439         tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
440                     *reserved_down);
441         return true;
442 }
443
444 static bool tb_dp_is_usb4(const struct tb_switch *sw)
445 {
446         /* Titan Ridge DP adapters need the same treatment as USB4 */
447         return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
448 }
449
450 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
451                               int timeout_msec)
452 {
453         ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
454         u32 val;
455         int ret;
456
457         /* Both ends need to support this */
458         if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
459                 return 0;
460
461         ret = tb_port_read(out, &val, TB_CFG_PORT,
462                            out->cap_adap + DP_STATUS_CTRL, 1);
463         if (ret)
464                 return ret;
465
466         val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
467
468         ret = tb_port_write(out, &val, TB_CFG_PORT,
469                             out->cap_adap + DP_STATUS_CTRL, 1);
470         if (ret)
471                 return ret;
472
473         do {
474                 ret = tb_port_read(out, &val, TB_CFG_PORT,
475                                    out->cap_adap + DP_STATUS_CTRL, 1);
476                 if (ret)
477                         return ret;
478                 if (!(val & DP_STATUS_CTRL_CMHS))
479                         return 0;
480                 usleep_range(100, 150);
481         } while (ktime_before(ktime_get(), timeout));
482
483         return -ETIMEDOUT;
484 }
485
486 /*
487  * Returns maximum possible rate from capability supporting only DP 2.0
488  * and below. Used when DP BW allocation mode is not enabled.
489  */
490 static inline u32 tb_dp_cap_get_rate(u32 val)
491 {
492         u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
493
494         switch (rate) {
495         case DP_COMMON_CAP_RATE_RBR:
496                 return 1620;
497         case DP_COMMON_CAP_RATE_HBR:
498                 return 2700;
499         case DP_COMMON_CAP_RATE_HBR2:
500                 return 5400;
501         case DP_COMMON_CAP_RATE_HBR3:
502                 return 8100;
503         default:
504                 return 0;
505         }
506 }
507
508 /*
509  * Returns maximum possible rate from capability supporting DP 2.1
510  * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
511  * mode is enabled.
512  */
513 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
514 {
515         if (val & DP_COMMON_CAP_UHBR20)
516                 return 20000;
517         else if (val & DP_COMMON_CAP_UHBR13_5)
518                 return 13500;
519         else if (val & DP_COMMON_CAP_UHBR10)
520                 return 10000;
521
522         return tb_dp_cap_get_rate(val);
523 }
524
525 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
526 {
527         return rate >= 10000;
528 }
529
530 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
531 {
532         val &= ~DP_COMMON_CAP_RATE_MASK;
533         switch (rate) {
534         default:
535                 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
536                 fallthrough;
537         case 1620:
538                 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
539                 break;
540         case 2700:
541                 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
542                 break;
543         case 5400:
544                 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
545                 break;
546         case 8100:
547                 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
548                 break;
549         }
550         return val;
551 }
552
553 static inline u32 tb_dp_cap_get_lanes(u32 val)
554 {
555         u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
556
557         switch (lanes) {
558         case DP_COMMON_CAP_1_LANE:
559                 return 1;
560         case DP_COMMON_CAP_2_LANES:
561                 return 2;
562         case DP_COMMON_CAP_4_LANES:
563                 return 4;
564         default:
565                 return 0;
566         }
567 }
568
569 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
570 {
571         val &= ~DP_COMMON_CAP_LANES_MASK;
572         switch (lanes) {
573         default:
574                 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
575                      lanes);
576                 fallthrough;
577         case 1:
578                 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
579                 break;
580         case 2:
581                 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
582                 break;
583         case 4:
584                 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
585                 break;
586         }
587         return val;
588 }
589
590 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
591 {
592         /* Tunneling removes the DP 8b/10b 128/132b encoding */
593         if (tb_dp_is_uhbr_rate(rate))
594                 return rate * lanes * 128 / 132;
595         return rate * lanes * 8 / 10;
596 }
597
598 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
599                                   u32 out_rate, u32 out_lanes, u32 *new_rate,
600                                   u32 *new_lanes)
601 {
602         static const u32 dp_bw[][2] = {
603                 /* Mb/s, lanes */
604                 { 8100, 4 }, /* 25920 Mb/s */
605                 { 5400, 4 }, /* 17280 Mb/s */
606                 { 8100, 2 }, /* 12960 Mb/s */
607                 { 2700, 4 }, /* 8640 Mb/s */
608                 { 5400, 2 }, /* 8640 Mb/s */
609                 { 8100, 1 }, /* 6480 Mb/s */
610                 { 1620, 4 }, /* 5184 Mb/s */
611                 { 5400, 1 }, /* 4320 Mb/s */
612                 { 2700, 2 }, /* 4320 Mb/s */
613                 { 1620, 2 }, /* 2592 Mb/s */
614                 { 2700, 1 }, /* 2160 Mb/s */
615                 { 1620, 1 }, /* 1296 Mb/s */
616         };
617         unsigned int i;
618
619         /*
620          * Find a combination that can fit into max_bw and does not
621          * exceed the maximum rate and lanes supported by the DP OUT and
622          * DP IN adapters.
623          */
624         for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
625                 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
626                         continue;
627
628                 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
629                         continue;
630
631                 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
632                         *new_rate = dp_bw[i][0];
633                         *new_lanes = dp_bw[i][1];
634                         return 0;
635                 }
636         }
637
638         return -ENOSR;
639 }
640
641 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
642 {
643         u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
644         struct tb_port *out = tunnel->dst_port;
645         struct tb_port *in = tunnel->src_port;
646         int ret, max_bw;
647
648         /*
649          * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
650          * newer generation hardware.
651          */
652         if (in->sw->generation < 2 || out->sw->generation < 2)
653                 return 0;
654
655         /*
656          * Perform connection manager handshake between IN and OUT ports
657          * before capabilities exchange can take place.
658          */
659         ret = tb_dp_cm_handshake(in, out, 3000);
660         if (ret)
661                 return ret;
662
663         /* Read both DP_LOCAL_CAP registers */
664         ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
665                            in->cap_adap + DP_LOCAL_CAP, 1);
666         if (ret)
667                 return ret;
668
669         ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
670                            out->cap_adap + DP_LOCAL_CAP, 1);
671         if (ret)
672                 return ret;
673
674         /* Write IN local caps to OUT remote caps */
675         ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
676                             out->cap_adap + DP_REMOTE_CAP, 1);
677         if (ret)
678                 return ret;
679
680         in_rate = tb_dp_cap_get_rate(in_dp_cap);
681         in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
682         tb_tunnel_dbg(tunnel,
683                       "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
684                       in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
685
686         /*
687          * If the tunnel bandwidth is limited (max_bw is set) then see
688          * if we need to reduce bandwidth to fit there.
689          */
690         out_rate = tb_dp_cap_get_rate(out_dp_cap);
691         out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
692         bw = tb_dp_bandwidth(out_rate, out_lanes);
693         tb_tunnel_dbg(tunnel,
694                       "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
695                       out_rate, out_lanes, bw);
696
697         if (tb_port_path_direction_downstream(in, out))
698                 max_bw = tunnel->max_down;
699         else
700                 max_bw = tunnel->max_up;
701
702         if (max_bw && bw > max_bw) {
703                 u32 new_rate, new_lanes, new_bw;
704
705                 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
706                                              out_rate, out_lanes, &new_rate,
707                                              &new_lanes);
708                 if (ret) {
709                         tb_tunnel_info(tunnel, "not enough bandwidth\n");
710                         return ret;
711                 }
712
713                 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
714                 tb_tunnel_dbg(tunnel,
715                               "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
716                               new_rate, new_lanes, new_bw);
717
718                 /*
719                  * Set new rate and number of lanes before writing it to
720                  * the IN port remote caps.
721                  */
722                 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
723                 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
724         }
725
726         /*
727          * Titan Ridge does not disable AUX timers when it gets
728          * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
729          * DP tunneling.
730          */
731         if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
732                 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
733                 tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
734         }
735
736         return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
737                              in->cap_adap + DP_REMOTE_CAP, 1);
738 }
739
740 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
741 {
742         int ret, estimated_bw, granularity, tmp;
743         struct tb_port *out = tunnel->dst_port;
744         struct tb_port *in = tunnel->src_port;
745         u32 out_dp_cap, out_rate, out_lanes;
746         u32 in_dp_cap, in_rate, in_lanes;
747         u32 rate, lanes;
748
749         if (!bw_alloc_mode)
750                 return 0;
751
752         ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
753         if (ret)
754                 return ret;
755
756         ret = usb4_dp_port_set_group_id(in, in->group->index);
757         if (ret)
758                 return ret;
759
760         /*
761          * Get the non-reduced rate and lanes based on the lowest
762          * capability of both adapters.
763          */
764         ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
765                            in->cap_adap + DP_LOCAL_CAP, 1);
766         if (ret)
767                 return ret;
768
769         ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
770                            out->cap_adap + DP_LOCAL_CAP, 1);
771         if (ret)
772                 return ret;
773
774         in_rate = tb_dp_cap_get_rate(in_dp_cap);
775         in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
776         out_rate = tb_dp_cap_get_rate(out_dp_cap);
777         out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
778
779         rate = min(in_rate, out_rate);
780         lanes = min(in_lanes, out_lanes);
781         tmp = tb_dp_bandwidth(rate, lanes);
782
783         tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
784                       rate, lanes, tmp);
785
786         ret = usb4_dp_port_set_nrd(in, rate, lanes);
787         if (ret)
788                 return ret;
789
790         /*
791          * Pick up granularity that supports maximum possible bandwidth.
792          * For that we use the UHBR rates too.
793          */
794         in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
795         out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
796         rate = min(in_rate, out_rate);
797         tmp = tb_dp_bandwidth(rate, lanes);
798
799         tb_tunnel_dbg(tunnel,
800                       "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
801                       rate, lanes, tmp);
802
803         for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
804              granularity *= 2)
805                 ;
806
807         tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
808
809         /*
810          * Returns -EINVAL if granularity above is outside of the
811          * accepted ranges.
812          */
813         ret = usb4_dp_port_set_granularity(in, granularity);
814         if (ret)
815                 return ret;
816
817         /*
818          * Bandwidth estimation is pretty much what we have in
819          * max_up/down fields. For discovery we just read what the
820          * estimation was set to.
821          */
822         if (tb_port_path_direction_downstream(in, out))
823                 estimated_bw = tunnel->max_down;
824         else
825                 estimated_bw = tunnel->max_up;
826
827         tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
828
829         ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
830         if (ret)
831                 return ret;
832
833         /* Initial allocation should be 0 according the spec */
834         ret = usb4_dp_port_allocate_bandwidth(in, 0);
835         if (ret)
836                 return ret;
837
838         tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
839         return 0;
840 }
841
842 static int tb_dp_init(struct tb_tunnel *tunnel)
843 {
844         struct tb_port *in = tunnel->src_port;
845         struct tb_switch *sw = in->sw;
846         struct tb *tb = in->sw->tb;
847         int ret;
848
849         ret = tb_dp_xchg_caps(tunnel);
850         if (ret)
851                 return ret;
852
853         if (!tb_switch_is_usb4(sw))
854                 return 0;
855
856         if (!usb4_dp_port_bandwidth_mode_supported(in))
857                 return 0;
858
859         tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
860
861         ret = usb4_dp_port_set_cm_id(in, tb->index);
862         if (ret)
863                 return ret;
864
865         return tb_dp_bandwidth_alloc_mode_enable(tunnel);
866 }
867
868 static void tb_dp_deinit(struct tb_tunnel *tunnel)
869 {
870         struct tb_port *in = tunnel->src_port;
871
872         if (!usb4_dp_port_bandwidth_mode_supported(in))
873                 return;
874         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
875                 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
876                 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
877         }
878 }
879
880 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
881 {
882         int ret;
883
884         if (active) {
885                 struct tb_path **paths;
886                 int last;
887
888                 paths = tunnel->paths;
889                 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
890
891                 tb_dp_port_set_hops(tunnel->src_port,
892                         paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
893                         paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
894                         paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
895
896                 tb_dp_port_set_hops(tunnel->dst_port,
897                         paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
898                         paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
899                         paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
900         } else {
901                 tb_dp_port_hpd_clear(tunnel->src_port);
902                 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
903                 if (tb_port_is_dpout(tunnel->dst_port))
904                         tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
905         }
906
907         ret = tb_dp_port_enable(tunnel->src_port, active);
908         if (ret)
909                 return ret;
910
911         if (tb_port_is_dpout(tunnel->dst_port))
912                 return tb_dp_port_enable(tunnel->dst_port, active);
913
914         return 0;
915 }
916
917 /* max_bw is rounded up to next granularity */
918 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
919                                                   int *max_bw)
920 {
921         struct tb_port *in = tunnel->src_port;
922         int ret, rate, lanes, nrd_bw;
923         u32 cap;
924
925         /*
926          * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
927          * read parameter values so this so we can use this to determine
928          * the maximum possible bandwidth over this link.
929          *
930          * See USB4 v2 spec 1.0 10.4.4.5.
931          */
932         ret = tb_port_read(in, &cap, TB_CFG_PORT,
933                            in->cap_adap + DP_LOCAL_CAP, 1);
934         if (ret)
935                 return ret;
936
937         rate = tb_dp_cap_get_rate_ext(cap);
938         if (tb_dp_is_uhbr_rate(rate)) {
939                 /*
940                  * When UHBR is used there is no reduction in lanes so
941                  * we can use this directly.
942                  */
943                 lanes = tb_dp_cap_get_lanes(cap);
944         } else {
945                 /*
946                  * If there is no UHBR supported then check the
947                  * non-reduced rate and lanes.
948                  */
949                 ret = usb4_dp_port_nrd(in, &rate, &lanes);
950                 if (ret)
951                         return ret;
952         }
953
954         nrd_bw = tb_dp_bandwidth(rate, lanes);
955
956         if (max_bw) {
957                 ret = usb4_dp_port_granularity(in);
958                 if (ret < 0)
959                         return ret;
960                 *max_bw = roundup(nrd_bw, ret);
961         }
962
963         return nrd_bw;
964 }
965
966 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
967                                                    int *consumed_up,
968                                                    int *consumed_down)
969 {
970         struct tb_port *out = tunnel->dst_port;
971         struct tb_port *in = tunnel->src_port;
972         int ret, allocated_bw, max_bw;
973
974         if (!usb4_dp_port_bandwidth_mode_enabled(in))
975                 return -EOPNOTSUPP;
976
977         if (!tunnel->bw_mode)
978                 return -EOPNOTSUPP;
979
980         /* Read what was allocated previously if any */
981         ret = usb4_dp_port_allocated_bandwidth(in);
982         if (ret < 0)
983                 return ret;
984         allocated_bw = ret;
985
986         ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
987         if (ret < 0)
988                 return ret;
989         if (allocated_bw == max_bw)
990                 allocated_bw = ret;
991
992         if (tb_port_path_direction_downstream(in, out)) {
993                 *consumed_up = 0;
994                 *consumed_down = allocated_bw;
995         } else {
996                 *consumed_up = allocated_bw;
997                 *consumed_down = 0;
998         }
999
1000         return 0;
1001 }
1002
1003 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1004                                      int *allocated_down)
1005 {
1006         struct tb_port *out = tunnel->dst_port;
1007         struct tb_port *in = tunnel->src_port;
1008
1009         /*
1010          * If we have already set the allocated bandwidth then use that.
1011          * Otherwise we read it from the DPRX.
1012          */
1013         if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1014                 int ret, allocated_bw, max_bw;
1015
1016                 ret = usb4_dp_port_allocated_bandwidth(in);
1017                 if (ret < 0)
1018                         return ret;
1019                 allocated_bw = ret;
1020
1021                 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1022                 if (ret < 0)
1023                         return ret;
1024                 if (allocated_bw == max_bw)
1025                         allocated_bw = ret;
1026
1027                 if (tb_port_path_direction_downstream(in, out)) {
1028                         *allocated_up = 0;
1029                         *allocated_down = allocated_bw;
1030                 } else {
1031                         *allocated_up = allocated_bw;
1032                         *allocated_down = 0;
1033                 }
1034                 return 0;
1035         }
1036
1037         return tunnel->consumed_bandwidth(tunnel, allocated_up,
1038                                           allocated_down);
1039 }
1040
1041 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1042                                  int *alloc_down)
1043 {
1044         struct tb_port *out = tunnel->dst_port;
1045         struct tb_port *in = tunnel->src_port;
1046         int max_bw, ret, tmp;
1047
1048         if (!usb4_dp_port_bandwidth_mode_enabled(in))
1049                 return -EOPNOTSUPP;
1050
1051         ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1052         if (ret < 0)
1053                 return ret;
1054
1055         if (tb_port_path_direction_downstream(in, out)) {
1056                 tmp = min(*alloc_down, max_bw);
1057                 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1058                 if (ret)
1059                         return ret;
1060                 *alloc_down = tmp;
1061                 *alloc_up = 0;
1062         } else {
1063                 tmp = min(*alloc_up, max_bw);
1064                 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1065                 if (ret)
1066                         return ret;
1067                 *alloc_down = 0;
1068                 *alloc_up = tmp;
1069         }
1070
1071         /* Now we can use BW mode registers to figure out the bandwidth */
1072         /* TODO: need to handle discovery too */
1073         tunnel->bw_mode = true;
1074         return 0;
1075 }
1076
1077 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1078 {
1079         ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1080         struct tb_port *in = tunnel->src_port;
1081
1082         /*
1083          * Wait for DPRX done. Normally it should be already set for
1084          * active tunnel.
1085          */
1086         do {
1087                 u32 val;
1088                 int ret;
1089
1090                 ret = tb_port_read(in, &val, TB_CFG_PORT,
1091                                    in->cap_adap + DP_COMMON_CAP, 1);
1092                 if (ret)
1093                         return ret;
1094
1095                 if (val & DP_COMMON_CAP_DPRX_DONE) {
1096                         tb_tunnel_dbg(tunnel, "DPRX read done\n");
1097                         return 0;
1098                 }
1099                 usleep_range(100, 150);
1100         } while (ktime_before(ktime_get(), timeout));
1101
1102         tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1103         return -ETIMEDOUT;
1104 }
1105
1106 /* Read cap from tunnel DP IN */
1107 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1108                           u32 *lanes)
1109 {
1110         struct tb_port *in = tunnel->src_port;
1111         u32 val;
1112         int ret;
1113
1114         switch (cap) {
1115         case DP_LOCAL_CAP:
1116         case DP_REMOTE_CAP:
1117         case DP_COMMON_CAP:
1118                 break;
1119
1120         default:
1121                 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1122                 return -EINVAL;
1123         }
1124
1125         /*
1126          * Read from the copied remote cap so that we take into account
1127          * if capabilities were reduced during exchange.
1128          */
1129         ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1130         if (ret)
1131                 return ret;
1132
1133         *rate = tb_dp_cap_get_rate(val);
1134         *lanes = tb_dp_cap_get_lanes(val);
1135         return 0;
1136 }
1137
1138 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1139                                    int *max_down)
1140 {
1141         struct tb_port *in = tunnel->src_port;
1142         int ret;
1143
1144         if (!usb4_dp_port_bandwidth_mode_enabled(in))
1145                 return -EOPNOTSUPP;
1146
1147         ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1148         if (ret < 0)
1149                 return ret;
1150
1151         if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1152                 *max_up = 0;
1153                 *max_down = ret;
1154         } else {
1155                 *max_up = ret;
1156                 *max_down = 0;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1163                                     int *consumed_down)
1164 {
1165         struct tb_port *in = tunnel->src_port;
1166         const struct tb_switch *sw = in->sw;
1167         u32 rate = 0, lanes = 0;
1168         int ret;
1169
1170         if (tb_dp_is_usb4(sw)) {
1171                 /*
1172                  * On USB4 routers check if the bandwidth allocation
1173                  * mode is enabled first and then read the bandwidth
1174                  * through those registers.
1175                  */
1176                 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1177                                                               consumed_down);
1178                 if (ret < 0) {
1179                         if (ret != -EOPNOTSUPP)
1180                                 return ret;
1181                 } else if (!ret) {
1182                         return 0;
1183                 }
1184                 /*
1185                  * Then see if the DPRX negotiation is ready and if yes
1186                  * return that bandwidth (it may be smaller than the
1187                  * reduced one). Otherwise return the remote (possibly
1188                  * reduced) caps.
1189                  */
1190                 ret = tb_dp_wait_dprx(tunnel, 150);
1191                 if (ret) {
1192                         if (ret == -ETIMEDOUT)
1193                                 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1194                                                      &rate, &lanes);
1195                         if (ret)
1196                                 return ret;
1197                 }
1198                 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1199                 if (ret)
1200                         return ret;
1201         } else if (sw->generation >= 2) {
1202                 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1203                 if (ret)
1204                         return ret;
1205         } else {
1206                 /* No bandwidth management for legacy devices  */
1207                 *consumed_up = 0;
1208                 *consumed_down = 0;
1209                 return 0;
1210         }
1211
1212         if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1213                 *consumed_up = 0;
1214                 *consumed_down = tb_dp_bandwidth(rate, lanes);
1215         } else {
1216                 *consumed_up = tb_dp_bandwidth(rate, lanes);
1217                 *consumed_down = 0;
1218         }
1219
1220         return 0;
1221 }
1222
1223 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1224 {
1225         struct tb_port *port = hop->in_port;
1226         struct tb_switch *sw = port->sw;
1227
1228         if (tb_port_use_credit_allocation(port))
1229                 hop->initial_credits = sw->min_dp_aux_credits;
1230         else
1231                 hop->initial_credits = 1;
1232 }
1233
1234 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1235 {
1236         struct tb_path_hop *hop;
1237
1238         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1239         path->egress_shared_buffer = TB_PATH_NONE;
1240         path->ingress_fc_enable = TB_PATH_ALL;
1241         path->ingress_shared_buffer = TB_PATH_NONE;
1242         path->priority = TB_DP_AUX_PRIORITY;
1243         path->weight = TB_DP_AUX_WEIGHT;
1244
1245         tb_path_for_each_hop(path, hop) {
1246                 tb_dp_init_aux_credits(hop);
1247                 if (pm_support)
1248                         tb_init_pm_support(hop);
1249         }
1250 }
1251
1252 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1253 {
1254         struct tb_port *port = hop->in_port;
1255         struct tb_switch *sw = port->sw;
1256
1257         if (tb_port_use_credit_allocation(port)) {
1258                 unsigned int nfc_credits;
1259                 size_t max_dp_streams;
1260
1261                 tb_available_credits(port, &max_dp_streams);
1262                 /*
1263                  * Read the number of currently allocated NFC credits
1264                  * from the lane adapter. Since we only use them for DP
1265                  * tunneling we can use that to figure out how many DP
1266                  * tunnels already go through the lane adapter.
1267                  */
1268                 nfc_credits = port->config.nfc_credits &
1269                                 ADP_CS_4_NFC_BUFFERS_MASK;
1270                 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1271                         return -ENOSPC;
1272
1273                 hop->nfc_credits = sw->min_dp_main_credits;
1274         } else {
1275                 hop->nfc_credits = min(port->total_credits - 2, 12U);
1276         }
1277
1278         return 0;
1279 }
1280
1281 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1282 {
1283         struct tb_path_hop *hop;
1284
1285         path->egress_fc_enable = TB_PATH_NONE;
1286         path->egress_shared_buffer = TB_PATH_NONE;
1287         path->ingress_fc_enable = TB_PATH_NONE;
1288         path->ingress_shared_buffer = TB_PATH_NONE;
1289         path->priority = TB_DP_VIDEO_PRIORITY;
1290         path->weight = TB_DP_VIDEO_WEIGHT;
1291
1292         tb_path_for_each_hop(path, hop) {
1293                 int ret;
1294
1295                 ret = tb_dp_init_video_credits(hop);
1296                 if (ret)
1297                         return ret;
1298                 if (pm_support)
1299                         tb_init_pm_support(hop);
1300         }
1301
1302         return 0;
1303 }
1304
1305 static void tb_dp_dump(struct tb_tunnel *tunnel)
1306 {
1307         struct tb_port *in, *out;
1308         u32 dp_cap, rate, lanes;
1309
1310         in = tunnel->src_port;
1311         out = tunnel->dst_port;
1312
1313         if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1314                          in->cap_adap + DP_LOCAL_CAP, 1))
1315                 return;
1316
1317         rate = tb_dp_cap_get_rate(dp_cap);
1318         lanes = tb_dp_cap_get_lanes(dp_cap);
1319
1320         tb_tunnel_dbg(tunnel,
1321                       "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1322                       rate, lanes, tb_dp_bandwidth(rate, lanes));
1323
1324         if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1325                          out->cap_adap + DP_LOCAL_CAP, 1))
1326                 return;
1327
1328         rate = tb_dp_cap_get_rate(dp_cap);
1329         lanes = tb_dp_cap_get_lanes(dp_cap);
1330
1331         tb_tunnel_dbg(tunnel,
1332                       "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1333                       rate, lanes, tb_dp_bandwidth(rate, lanes));
1334
1335         if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1336                          in->cap_adap + DP_REMOTE_CAP, 1))
1337                 return;
1338
1339         rate = tb_dp_cap_get_rate(dp_cap);
1340         lanes = tb_dp_cap_get_lanes(dp_cap);
1341
1342         tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1343                       rate, lanes, tb_dp_bandwidth(rate, lanes));
1344 }
1345
1346 /**
1347  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1348  * @tb: Pointer to the domain structure
1349  * @in: DP in adapter
1350  * @alloc_hopid: Allocate HopIDs from visited ports
1351  *
1352  * If @in adapter is active, follows the tunnel to the DP out adapter
1353  * and back. Returns the discovered tunnel or %NULL if there was no
1354  * tunnel.
1355  *
1356  * Return: DP tunnel or %NULL if no tunnel found.
1357  */
1358 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1359                                         bool alloc_hopid)
1360 {
1361         struct tb_tunnel *tunnel;
1362         struct tb_port *port;
1363         struct tb_path *path;
1364
1365         if (!tb_dp_port_is_enabled(in))
1366                 return NULL;
1367
1368         tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1369         if (!tunnel)
1370                 return NULL;
1371
1372         tunnel->init = tb_dp_init;
1373         tunnel->deinit = tb_dp_deinit;
1374         tunnel->activate = tb_dp_activate;
1375         tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1376         tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1377         tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1378         tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1379         tunnel->src_port = in;
1380
1381         path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1382                                 &tunnel->dst_port, "Video", alloc_hopid);
1383         if (!path) {
1384                 /* Just disable the DP IN port */
1385                 tb_dp_port_enable(in, false);
1386                 goto err_free;
1387         }
1388         tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1389         if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1390                 goto err_free;
1391
1392         path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1393                                 alloc_hopid);
1394         if (!path)
1395                 goto err_deactivate;
1396         tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1397         tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1398
1399         path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1400                                 &port, "AUX RX", alloc_hopid);
1401         if (!path)
1402                 goto err_deactivate;
1403         tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1404         tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1405
1406         /* Validate that the tunnel is complete */
1407         if (!tb_port_is_dpout(tunnel->dst_port)) {
1408                 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1409                 goto err_deactivate;
1410         }
1411
1412         if (!tb_dp_port_is_enabled(tunnel->dst_port))
1413                 goto err_deactivate;
1414
1415         if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1416                 goto err_deactivate;
1417
1418         if (port != tunnel->src_port) {
1419                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1420                 goto err_deactivate;
1421         }
1422
1423         tb_dp_dump(tunnel);
1424
1425         tb_tunnel_dbg(tunnel, "discovered\n");
1426         return tunnel;
1427
1428 err_deactivate:
1429         tb_tunnel_deactivate(tunnel);
1430 err_free:
1431         tb_tunnel_free(tunnel);
1432
1433         return NULL;
1434 }
1435
1436 /**
1437  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1438  * @tb: Pointer to the domain structure
1439  * @in: DP in adapter port
1440  * @out: DP out adapter port
1441  * @link_nr: Preferred lane adapter when the link is not bonded
1442  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1443  *          if not limited)
1444  * @max_down: Maximum available downstream bandwidth for the DP tunnel
1445  *            (%0 if not limited)
1446  *
1447  * Allocates a tunnel between @in and @out that is capable of tunneling
1448  * Display Port traffic.
1449  *
1450  * Return: Returns a tb_tunnel on success or NULL on failure.
1451  */
1452 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1453                                      struct tb_port *out, int link_nr,
1454                                      int max_up, int max_down)
1455 {
1456         struct tb_tunnel *tunnel;
1457         struct tb_path **paths;
1458         struct tb_path *path;
1459         bool pm_support;
1460
1461         if (WARN_ON(!in->cap_adap || !out->cap_adap))
1462                 return NULL;
1463
1464         tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1465         if (!tunnel)
1466                 return NULL;
1467
1468         tunnel->init = tb_dp_init;
1469         tunnel->deinit = tb_dp_deinit;
1470         tunnel->activate = tb_dp_activate;
1471         tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1472         tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1473         tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1474         tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1475         tunnel->src_port = in;
1476         tunnel->dst_port = out;
1477         tunnel->max_up = max_up;
1478         tunnel->max_down = max_down;
1479
1480         paths = tunnel->paths;
1481         pm_support = usb4_switch_version(in->sw) >= 2;
1482
1483         path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1484                              link_nr, "Video");
1485         if (!path)
1486                 goto err_free;
1487         tb_dp_init_video_path(path, pm_support);
1488         paths[TB_DP_VIDEO_PATH_OUT] = path;
1489
1490         path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1491                              TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1492         if (!path)
1493                 goto err_free;
1494         tb_dp_init_aux_path(path, pm_support);
1495         paths[TB_DP_AUX_PATH_OUT] = path;
1496
1497         path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1498                              TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1499         if (!path)
1500                 goto err_free;
1501         tb_dp_init_aux_path(path, pm_support);
1502         paths[TB_DP_AUX_PATH_IN] = path;
1503
1504         return tunnel;
1505
1506 err_free:
1507         tb_tunnel_free(tunnel);
1508         return NULL;
1509 }
1510
1511 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1512 {
1513         const struct tb_switch *sw = port->sw;
1514         int credits;
1515
1516         credits = tb_available_credits(port, NULL);
1517         if (tb_acpi_may_tunnel_pcie())
1518                 credits -= sw->max_pcie_credits;
1519         credits -= port->dma_credits;
1520
1521         return credits > 0 ? credits : 0;
1522 }
1523
1524 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1525 {
1526         struct tb_port *port = hop->in_port;
1527
1528         if (tb_port_use_credit_allocation(port)) {
1529                 unsigned int available = tb_dma_available_credits(port);
1530
1531                 /*
1532                  * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1533                  * DMA path cannot be established.
1534                  */
1535                 if (available < TB_MIN_DMA_CREDITS)
1536                         return -ENOSPC;
1537
1538                 while (credits > available)
1539                         credits--;
1540
1541                 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1542                             credits);
1543
1544                 port->dma_credits += credits;
1545         } else {
1546                 if (tb_port_is_null(port))
1547                         credits = port->bonded ? 14 : 6;
1548                 else
1549                         credits = min(port->total_credits, credits);
1550         }
1551
1552         hop->initial_credits = credits;
1553         return 0;
1554 }
1555
1556 /* Path from lane adapter to NHI */
1557 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1558 {
1559         struct tb_path_hop *hop;
1560         unsigned int i, tmp;
1561
1562         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1563         path->ingress_fc_enable = TB_PATH_ALL;
1564         path->egress_shared_buffer = TB_PATH_NONE;
1565         path->ingress_shared_buffer = TB_PATH_NONE;
1566         path->priority = TB_DMA_PRIORITY;
1567         path->weight = TB_DMA_WEIGHT;
1568         path->clear_fc = true;
1569
1570         /*
1571          * First lane adapter is the one connected to the remote host.
1572          * We don't tunnel other traffic over this link so can use all
1573          * the credits (except the ones reserved for control traffic).
1574          */
1575         hop = &path->hops[0];
1576         tmp = min(tb_usable_credits(hop->in_port), credits);
1577         hop->initial_credits = tmp;
1578         hop->in_port->dma_credits += tmp;
1579
1580         for (i = 1; i < path->path_length; i++) {
1581                 int ret;
1582
1583                 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1584                 if (ret)
1585                         return ret;
1586         }
1587
1588         return 0;
1589 }
1590
1591 /* Path from NHI to lane adapter */
1592 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1593 {
1594         struct tb_path_hop *hop;
1595
1596         path->egress_fc_enable = TB_PATH_ALL;
1597         path->ingress_fc_enable = TB_PATH_ALL;
1598         path->egress_shared_buffer = TB_PATH_NONE;
1599         path->ingress_shared_buffer = TB_PATH_NONE;
1600         path->priority = TB_DMA_PRIORITY;
1601         path->weight = TB_DMA_WEIGHT;
1602         path->clear_fc = true;
1603
1604         tb_path_for_each_hop(path, hop) {
1605                 int ret;
1606
1607                 ret = tb_dma_reserve_credits(hop, credits);
1608                 if (ret)
1609                         return ret;
1610         }
1611
1612         return 0;
1613 }
1614
1615 static void tb_dma_release_credits(struct tb_path_hop *hop)
1616 {
1617         struct tb_port *port = hop->in_port;
1618
1619         if (tb_port_use_credit_allocation(port)) {
1620                 port->dma_credits -= hop->initial_credits;
1621
1622                 tb_port_dbg(port, "released %u DMA path credits\n",
1623                             hop->initial_credits);
1624         }
1625 }
1626
1627 static void tb_dma_deinit_path(struct tb_path *path)
1628 {
1629         struct tb_path_hop *hop;
1630
1631         tb_path_for_each_hop(path, hop)
1632                 tb_dma_release_credits(hop);
1633 }
1634
1635 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1636 {
1637         int i;
1638
1639         for (i = 0; i < tunnel->npaths; i++) {
1640                 if (!tunnel->paths[i])
1641                         continue;
1642                 tb_dma_deinit_path(tunnel->paths[i]);
1643         }
1644 }
1645
1646 /**
1647  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1648  * @tb: Pointer to the domain structure
1649  * @nhi: Host controller port
1650  * @dst: Destination null port which the other domain is connected to
1651  * @transmit_path: HopID used for transmitting packets
1652  * @transmit_ring: NHI ring number used to send packets towards the
1653  *                 other domain. Set to %-1 if TX path is not needed.
1654  * @receive_path: HopID used for receiving packets
1655  * @receive_ring: NHI ring number used to receive packets from the
1656  *                other domain. Set to %-1 if RX path is not needed.
1657  *
1658  * Return: Returns a tb_tunnel on success or NULL on failure.
1659  */
1660 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1661                                       struct tb_port *dst, int transmit_path,
1662                                       int transmit_ring, int receive_path,
1663                                       int receive_ring)
1664 {
1665         struct tb_tunnel *tunnel;
1666         size_t npaths = 0, i = 0;
1667         struct tb_path *path;
1668         int credits;
1669
1670         /* Ring 0 is reserved for control channel */
1671         if (WARN_ON(!receive_ring || !transmit_ring))
1672                 return NULL;
1673
1674         if (receive_ring > 0)
1675                 npaths++;
1676         if (transmit_ring > 0)
1677                 npaths++;
1678
1679         if (WARN_ON(!npaths))
1680                 return NULL;
1681
1682         tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1683         if (!tunnel)
1684                 return NULL;
1685
1686         tunnel->src_port = nhi;
1687         tunnel->dst_port = dst;
1688         tunnel->deinit = tb_dma_deinit;
1689
1690         credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1691
1692         if (receive_ring > 0) {
1693                 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1694                                      "DMA RX");
1695                 if (!path)
1696                         goto err_free;
1697                 tunnel->paths[i++] = path;
1698                 if (tb_dma_init_rx_path(path, credits)) {
1699                         tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1700                         goto err_free;
1701                 }
1702         }
1703
1704         if (transmit_ring > 0) {
1705                 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1706                                      "DMA TX");
1707                 if (!path)
1708                         goto err_free;
1709                 tunnel->paths[i++] = path;
1710                 if (tb_dma_init_tx_path(path, credits)) {
1711                         tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1712                         goto err_free;
1713                 }
1714         }
1715
1716         return tunnel;
1717
1718 err_free:
1719         tb_tunnel_free(tunnel);
1720         return NULL;
1721 }
1722
1723 /**
1724  * tb_tunnel_match_dma() - Match DMA tunnel
1725  * @tunnel: Tunnel to match
1726  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1727  * @transmit_ring: NHI ring number used to send packets towards the
1728  *                 other domain. Pass %-1 to ignore.
1729  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1730  * @receive_ring: NHI ring number used to receive packets from the
1731  *                other domain. Pass %-1 to ignore.
1732  *
1733  * This function can be used to match specific DMA tunnel, if there are
1734  * multiple DMA tunnels going through the same XDomain connection.
1735  * Returns true if there is match and false otherwise.
1736  */
1737 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1738                          int transmit_ring, int receive_path, int receive_ring)
1739 {
1740         const struct tb_path *tx_path = NULL, *rx_path = NULL;
1741         int i;
1742
1743         if (!receive_ring || !transmit_ring)
1744                 return false;
1745
1746         for (i = 0; i < tunnel->npaths; i++) {
1747                 const struct tb_path *path = tunnel->paths[i];
1748
1749                 if (!path)
1750                         continue;
1751
1752                 if (tb_port_is_nhi(path->hops[0].in_port))
1753                         tx_path = path;
1754                 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1755                         rx_path = path;
1756         }
1757
1758         if (transmit_ring > 0 || transmit_path > 0) {
1759                 if (!tx_path)
1760                         return false;
1761                 if (transmit_ring > 0 &&
1762                     (tx_path->hops[0].in_hop_index != transmit_ring))
1763                         return false;
1764                 if (transmit_path > 0 &&
1765                     (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1766                         return false;
1767         }
1768
1769         if (receive_ring > 0 || receive_path > 0) {
1770                 if (!rx_path)
1771                         return false;
1772                 if (receive_path > 0 &&
1773                     (rx_path->hops[0].in_hop_index != receive_path))
1774                         return false;
1775                 if (receive_ring > 0 &&
1776                     (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1777                         return false;
1778         }
1779
1780         return true;
1781 }
1782
1783 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1784 {
1785         int ret, up_max_rate, down_max_rate;
1786
1787         ret = usb4_usb3_port_max_link_rate(up);
1788         if (ret < 0)
1789                 return ret;
1790         up_max_rate = ret;
1791
1792         ret = usb4_usb3_port_max_link_rate(down);
1793         if (ret < 0)
1794                 return ret;
1795         down_max_rate = ret;
1796
1797         return min(up_max_rate, down_max_rate);
1798 }
1799
1800 static int tb_usb3_init(struct tb_tunnel *tunnel)
1801 {
1802         tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1803                       tunnel->allocated_up, tunnel->allocated_down);
1804
1805         return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1806                                                  &tunnel->allocated_up,
1807                                                  &tunnel->allocated_down);
1808 }
1809
1810 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1811 {
1812         int res;
1813
1814         res = tb_usb3_port_enable(tunnel->src_port, activate);
1815         if (res)
1816                 return res;
1817
1818         if (tb_port_is_usb3_up(tunnel->dst_port))
1819                 return tb_usb3_port_enable(tunnel->dst_port, activate);
1820
1821         return 0;
1822 }
1823
1824 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1825                 int *consumed_up, int *consumed_down)
1826 {
1827         struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1828         int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1829
1830         /*
1831          * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1832          * take that it into account here.
1833          */
1834         *consumed_up = tunnel->allocated_up *
1835                 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1836         *consumed_down = tunnel->allocated_down *
1837                 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1838
1839         if (tb_port_get_link_generation(port) >= 4) {
1840                 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1841                 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1848 {
1849         int ret;
1850
1851         ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1852                                                &tunnel->allocated_up,
1853                                                &tunnel->allocated_down);
1854         if (ret)
1855                 return ret;
1856
1857         tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1858                       tunnel->allocated_up, tunnel->allocated_down);
1859         return 0;
1860 }
1861
1862 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1863                                                 int *available_up,
1864                                                 int *available_down)
1865 {
1866         int ret, max_rate, allocate_up, allocate_down;
1867
1868         ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1869         if (ret < 0) {
1870                 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1871                 return;
1872         }
1873
1874         /*
1875          * 90% of the max rate can be allocated for isochronous
1876          * transfers.
1877          */
1878         max_rate = ret * 90 / 100;
1879
1880         /* No need to reclaim if already at maximum */
1881         if (tunnel->allocated_up >= max_rate &&
1882             tunnel->allocated_down >= max_rate)
1883                 return;
1884
1885         /* Don't go lower than what is already allocated */
1886         allocate_up = min(max_rate, *available_up);
1887         if (allocate_up < tunnel->allocated_up)
1888                 allocate_up = tunnel->allocated_up;
1889
1890         allocate_down = min(max_rate, *available_down);
1891         if (allocate_down < tunnel->allocated_down)
1892                 allocate_down = tunnel->allocated_down;
1893
1894         /* If no changes no need to do more */
1895         if (allocate_up == tunnel->allocated_up &&
1896             allocate_down == tunnel->allocated_down)
1897                 return;
1898
1899         ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1900                                                 &allocate_down);
1901         if (ret) {
1902                 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1903                 return;
1904         }
1905
1906         tunnel->allocated_up = allocate_up;
1907         *available_up -= tunnel->allocated_up;
1908
1909         tunnel->allocated_down = allocate_down;
1910         *available_down -= tunnel->allocated_down;
1911
1912         tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1913                       tunnel->allocated_up, tunnel->allocated_down);
1914 }
1915
1916 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1917 {
1918         struct tb_port *port = hop->in_port;
1919         struct tb_switch *sw = port->sw;
1920         unsigned int credits;
1921
1922         if (tb_port_use_credit_allocation(port)) {
1923                 credits = sw->max_usb3_credits;
1924         } else {
1925                 if (tb_port_is_null(port))
1926                         credits = port->bonded ? 32 : 16;
1927                 else
1928                         credits = 7;
1929         }
1930
1931         hop->initial_credits = credits;
1932 }
1933
1934 static void tb_usb3_init_path(struct tb_path *path)
1935 {
1936         struct tb_path_hop *hop;
1937
1938         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1939         path->egress_shared_buffer = TB_PATH_NONE;
1940         path->ingress_fc_enable = TB_PATH_ALL;
1941         path->ingress_shared_buffer = TB_PATH_NONE;
1942         path->priority = TB_USB3_PRIORITY;
1943         path->weight = TB_USB3_WEIGHT;
1944         path->drop_packages = 0;
1945
1946         tb_path_for_each_hop(path, hop)
1947                 tb_usb3_init_credits(hop);
1948 }
1949
1950 /**
1951  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1952  * @tb: Pointer to the domain structure
1953  * @down: USB3 downstream adapter
1954  * @alloc_hopid: Allocate HopIDs from visited ports
1955  *
1956  * If @down adapter is active, follows the tunnel to the USB3 upstream
1957  * adapter and back. Returns the discovered tunnel or %NULL if there was
1958  * no tunnel.
1959  */
1960 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1961                                           bool alloc_hopid)
1962 {
1963         struct tb_tunnel *tunnel;
1964         struct tb_path *path;
1965
1966         if (!tb_usb3_port_is_enabled(down))
1967                 return NULL;
1968
1969         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1970         if (!tunnel)
1971                 return NULL;
1972
1973         tunnel->activate = tb_usb3_activate;
1974         tunnel->src_port = down;
1975
1976         /*
1977          * Discover both paths even if they are not complete. We will
1978          * clean them up by calling tb_tunnel_deactivate() below in that
1979          * case.
1980          */
1981         path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1982                                 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1983         if (!path) {
1984                 /* Just disable the downstream port */
1985                 tb_usb3_port_enable(down, false);
1986                 goto err_free;
1987         }
1988         tunnel->paths[TB_USB3_PATH_DOWN] = path;
1989         tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1990
1991         path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1992                                 "USB3 Up", alloc_hopid);
1993         if (!path)
1994                 goto err_deactivate;
1995         tunnel->paths[TB_USB3_PATH_UP] = path;
1996         tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1997
1998         /* Validate that the tunnel is complete */
1999         if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2000                 tb_port_warn(tunnel->dst_port,
2001                              "path does not end on an USB3 adapter, cleaning up\n");
2002                 goto err_deactivate;
2003         }
2004
2005         if (down != tunnel->src_port) {
2006                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2007                 goto err_deactivate;
2008         }
2009
2010         if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2011                 tb_tunnel_warn(tunnel,
2012                                "tunnel is not fully activated, cleaning up\n");
2013                 goto err_deactivate;
2014         }
2015
2016         if (!tb_route(down->sw)) {
2017                 int ret;
2018
2019                 /*
2020                  * Read the initial bandwidth allocation for the first
2021                  * hop tunnel.
2022                  */
2023                 ret = usb4_usb3_port_allocated_bandwidth(down,
2024                         &tunnel->allocated_up, &tunnel->allocated_down);
2025                 if (ret)
2026                         goto err_deactivate;
2027
2028                 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2029                               tunnel->allocated_up, tunnel->allocated_down);
2030
2031                 tunnel->init = tb_usb3_init;
2032                 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2033                 tunnel->release_unused_bandwidth =
2034                         tb_usb3_release_unused_bandwidth;
2035                 tunnel->reclaim_available_bandwidth =
2036                         tb_usb3_reclaim_available_bandwidth;
2037         }
2038
2039         tb_tunnel_dbg(tunnel, "discovered\n");
2040         return tunnel;
2041
2042 err_deactivate:
2043         tb_tunnel_deactivate(tunnel);
2044 err_free:
2045         tb_tunnel_free(tunnel);
2046
2047         return NULL;
2048 }
2049
2050 /**
2051  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2052  * @tb: Pointer to the domain structure
2053  * @up: USB3 upstream adapter port
2054  * @down: USB3 downstream adapter port
2055  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2056  *          if not limited).
2057  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2058  *            (%0 if not limited).
2059  *
2060  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2061  * @TB_TYPE_USB3_DOWN.
2062  *
2063  * Return: Returns a tb_tunnel on success or %NULL on failure.
2064  */
2065 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2066                                        struct tb_port *down, int max_up,
2067                                        int max_down)
2068 {
2069         struct tb_tunnel *tunnel;
2070         struct tb_path *path;
2071         int max_rate = 0;
2072
2073         /*
2074          * Check that we have enough bandwidth available for the new
2075          * USB3 tunnel.
2076          */
2077         if (max_up > 0 || max_down > 0) {
2078                 max_rate = tb_usb3_max_link_rate(down, up);
2079                 if (max_rate < 0)
2080                         return NULL;
2081
2082                 /* Only 90% can be allocated for USB3 isochronous transfers */
2083                 max_rate = max_rate * 90 / 100;
2084                 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2085                             max_rate);
2086
2087                 if (max_rate > max_up || max_rate > max_down) {
2088                         tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2089                         return NULL;
2090                 }
2091         }
2092
2093         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2094         if (!tunnel)
2095                 return NULL;
2096
2097         tunnel->activate = tb_usb3_activate;
2098         tunnel->src_port = down;
2099         tunnel->dst_port = up;
2100         tunnel->max_up = max_up;
2101         tunnel->max_down = max_down;
2102
2103         path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2104                              "USB3 Down");
2105         if (!path) {
2106                 tb_tunnel_free(tunnel);
2107                 return NULL;
2108         }
2109         tb_usb3_init_path(path);
2110         tunnel->paths[TB_USB3_PATH_DOWN] = path;
2111
2112         path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2113                              "USB3 Up");
2114         if (!path) {
2115                 tb_tunnel_free(tunnel);
2116                 return NULL;
2117         }
2118         tb_usb3_init_path(path);
2119         tunnel->paths[TB_USB3_PATH_UP] = path;
2120
2121         if (!tb_route(down->sw)) {
2122                 tunnel->allocated_up = max_rate;
2123                 tunnel->allocated_down = max_rate;
2124
2125                 tunnel->init = tb_usb3_init;
2126                 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2127                 tunnel->release_unused_bandwidth =
2128                         tb_usb3_release_unused_bandwidth;
2129                 tunnel->reclaim_available_bandwidth =
2130                         tb_usb3_reclaim_available_bandwidth;
2131         }
2132
2133         return tunnel;
2134 }
2135
2136 /**
2137  * tb_tunnel_free() - free a tunnel
2138  * @tunnel: Tunnel to be freed
2139  *
2140  * Frees a tunnel. The tunnel does not need to be deactivated.
2141  */
2142 void tb_tunnel_free(struct tb_tunnel *tunnel)
2143 {
2144         int i;
2145
2146         if (!tunnel)
2147                 return;
2148
2149         if (tunnel->deinit)
2150                 tunnel->deinit(tunnel);
2151
2152         for (i = 0; i < tunnel->npaths; i++) {
2153                 if (tunnel->paths[i])
2154                         tb_path_free(tunnel->paths[i]);
2155         }
2156
2157         kfree(tunnel->paths);
2158         kfree(tunnel);
2159 }
2160
2161 /**
2162  * tb_tunnel_is_invalid - check whether an activated path is still valid
2163  * @tunnel: Tunnel to check
2164  */
2165 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2166 {
2167         int i;
2168
2169         for (i = 0; i < tunnel->npaths; i++) {
2170                 WARN_ON(!tunnel->paths[i]->activated);
2171                 if (tb_path_is_invalid(tunnel->paths[i]))
2172                         return true;
2173         }
2174
2175         return false;
2176 }
2177
2178 /**
2179  * tb_tunnel_restart() - activate a tunnel after a hardware reset
2180  * @tunnel: Tunnel to restart
2181  *
2182  * Return: 0 on success and negative errno in case if failure
2183  */
2184 int tb_tunnel_restart(struct tb_tunnel *tunnel)
2185 {
2186         int res, i;
2187
2188         tb_tunnel_dbg(tunnel, "activating\n");
2189
2190         /*
2191          * Make sure all paths are properly disabled before enabling
2192          * them again.
2193          */
2194         for (i = 0; i < tunnel->npaths; i++) {
2195                 if (tunnel->paths[i]->activated) {
2196                         tb_path_deactivate(tunnel->paths[i]);
2197                         tunnel->paths[i]->activated = false;
2198                 }
2199         }
2200
2201         if (tunnel->init) {
2202                 res = tunnel->init(tunnel);
2203                 if (res)
2204                         return res;
2205         }
2206
2207         for (i = 0; i < tunnel->npaths; i++) {
2208                 res = tb_path_activate(tunnel->paths[i]);
2209                 if (res)
2210                         goto err;
2211         }
2212
2213         if (tunnel->activate) {
2214                 res = tunnel->activate(tunnel, true);
2215                 if (res)
2216                         goto err;
2217         }
2218
2219         return 0;
2220
2221 err:
2222         tb_tunnel_warn(tunnel, "activation failed\n");
2223         tb_tunnel_deactivate(tunnel);
2224         return res;
2225 }
2226
2227 /**
2228  * tb_tunnel_activate() - activate a tunnel
2229  * @tunnel: Tunnel to activate
2230  *
2231  * Return: Returns 0 on success or an error code on failure.
2232  */
2233 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2234 {
2235         int i;
2236
2237         for (i = 0; i < tunnel->npaths; i++) {
2238                 if (tunnel->paths[i]->activated) {
2239                         tb_tunnel_WARN(tunnel,
2240                                        "trying to activate an already activated tunnel\n");
2241                         return -EINVAL;
2242                 }
2243         }
2244
2245         return tb_tunnel_restart(tunnel);
2246 }
2247
2248 /**
2249  * tb_tunnel_deactivate() - deactivate a tunnel
2250  * @tunnel: Tunnel to deactivate
2251  */
2252 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2253 {
2254         int i;
2255
2256         tb_tunnel_dbg(tunnel, "deactivating\n");
2257
2258         if (tunnel->activate)
2259                 tunnel->activate(tunnel, false);
2260
2261         for (i = 0; i < tunnel->npaths; i++) {
2262                 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2263                         tb_path_deactivate(tunnel->paths[i]);
2264         }
2265 }
2266
2267 /**
2268  * tb_tunnel_port_on_path() - Does the tunnel go through port
2269  * @tunnel: Tunnel to check
2270  * @port: Port to check
2271  *
2272  * Returns true if @tunnel goes through @port (direction does not matter),
2273  * false otherwise.
2274  */
2275 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2276                             const struct tb_port *port)
2277 {
2278         int i;
2279
2280         for (i = 0; i < tunnel->npaths; i++) {
2281                 if (!tunnel->paths[i])
2282                         continue;
2283
2284                 if (tb_path_port_on_path(tunnel->paths[i], port))
2285                         return true;
2286         }
2287
2288         return false;
2289 }
2290
2291 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2292 {
2293         int i;
2294
2295         for (i = 0; i < tunnel->npaths; i++) {
2296                 if (!tunnel->paths[i])
2297                         return false;
2298                 if (!tunnel->paths[i]->activated)
2299                         return false;
2300         }
2301
2302         return true;
2303 }
2304
2305 /**
2306  * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2307  * @tunnel: Tunnel to check
2308  * @max_up: Maximum upstream bandwidth in Mb/s
2309  * @max_down: Maximum downstream bandwidth in Mb/s
2310  *
2311  * Returns maximum possible bandwidth this tunnel can go if not limited
2312  * by other bandwidth clients. If the tunnel does not support this
2313  * returns %-EOPNOTSUPP.
2314  */
2315 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2316                                 int *max_down)
2317 {
2318         if (!tb_tunnel_is_active(tunnel))
2319                 return -EINVAL;
2320
2321         if (tunnel->maximum_bandwidth)
2322                 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2323         return -EOPNOTSUPP;
2324 }
2325
2326 /**
2327  * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2328  * @tunnel: Tunnel to check
2329  * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2330  * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2331  *                  stored here
2332  *
2333  * Returns the bandwidth allocated for the tunnel. This may be higher
2334  * than what the tunnel actually consumes.
2335  */
2336 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2337                                   int *allocated_down)
2338 {
2339         if (!tb_tunnel_is_active(tunnel))
2340                 return -EINVAL;
2341
2342         if (tunnel->allocated_bandwidth)
2343                 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2344                                                    allocated_down);
2345         return -EOPNOTSUPP;
2346 }
2347
2348 /**
2349  * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2350  * @tunnel: Tunnel whose bandwidth allocation to change
2351  * @alloc_up: New upstream bandwidth in Mb/s
2352  * @alloc_down: New downstream bandwidth in Mb/s
2353  *
2354  * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2355  * and updates @alloc_up and @alloc_down to that was actually allocated
2356  * (it may not be the same as passed originally). Returns negative errno
2357  * in case of failure.
2358  */
2359 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2360                               int *alloc_down)
2361 {
2362         if (!tb_tunnel_is_active(tunnel))
2363                 return -EINVAL;
2364
2365         if (tunnel->alloc_bandwidth)
2366                 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2367
2368         return -EOPNOTSUPP;
2369 }
2370
2371 /**
2372  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2373  * @tunnel: Tunnel to check
2374  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2375  *               Can be %NULL.
2376  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2377  *                 Can be %NULL.
2378  *
2379  * Stores the amount of isochronous bandwidth @tunnel consumes in
2380  * @consumed_up and @consumed_down. In case of success returns %0,
2381  * negative errno otherwise.
2382  */
2383 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2384                                  int *consumed_down)
2385 {
2386         int up_bw = 0, down_bw = 0;
2387
2388         if (!tb_tunnel_is_active(tunnel))
2389                 goto out;
2390
2391         if (tunnel->consumed_bandwidth) {
2392                 int ret;
2393
2394                 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2395                 if (ret)
2396                         return ret;
2397
2398                 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2399                               down_bw);
2400         }
2401
2402 out:
2403         if (consumed_up)
2404                 *consumed_up = up_bw;
2405         if (consumed_down)
2406                 *consumed_down = down_bw;
2407
2408         return 0;
2409 }
2410
2411 /**
2412  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2413  * @tunnel: Tunnel whose unused bandwidth to release
2414  *
2415  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2416  * moment) this function makes it to release all the unused bandwidth.
2417  *
2418  * Returns %0 in case of success and negative errno otherwise.
2419  */
2420 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2421 {
2422         if (!tb_tunnel_is_active(tunnel))
2423                 return 0;
2424
2425         if (tunnel->release_unused_bandwidth) {
2426                 int ret;
2427
2428                 ret = tunnel->release_unused_bandwidth(tunnel);
2429                 if (ret)
2430                         return ret;
2431         }
2432
2433         return 0;
2434 }
2435
2436 /**
2437  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2438  * @tunnel: Tunnel reclaiming available bandwidth
2439  * @available_up: Available upstream bandwidth (in Mb/s)
2440  * @available_down: Available downstream bandwidth (in Mb/s)
2441  *
2442  * Reclaims bandwidth from @available_up and @available_down and updates
2443  * the variables accordingly (e.g decreases both according to what was
2444  * reclaimed by the tunnel). If nothing was reclaimed the values are
2445  * kept as is.
2446  */
2447 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2448                                            int *available_up,
2449                                            int *available_down)
2450 {
2451         if (!tb_tunnel_is_active(tunnel))
2452                 return;
2453
2454         if (tunnel->reclaim_available_bandwidth)
2455                 tunnel->reclaim_available_bandwidth(tunnel, available_up,
2456                                                     available_down);
2457 }
2458
2459 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2460 {
2461         return tb_tunnel_names[tunnel->type];
2462 }