Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3364f0c1 | 2 | /* |
93f36ade | 3 | * Thunderbolt driver - Tunneling support |
3364f0c1 AN |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
93f36ade | 6 | * Copyright (C) 2019, Intel Corporation |
3364f0c1 AN |
7 | */ |
8 | ||
de718ac7 | 9 | #include <linux/delay.h> |
3364f0c1 AN |
10 | #include <linux/slab.h> |
11 | #include <linux/list.h> | |
6ce35635 | 12 | #include <linux/ktime.h> |
3364f0c1 | 13 | |
1752b9f7 | 14 | #include "tunnel.h" |
3364f0c1 AN |
15 | #include "tb.h" |
16 | ||
8c7acaaf MW |
17 | /* PCIe adapters use always HopID of 8 for both directions */ |
18 | #define TB_PCI_HOPID 8 | |
19 | ||
93f36ade MW |
20 | #define TB_PCI_PATH_DOWN 0 |
21 | #define TB_PCI_PATH_UP 1 | |
22 | ||
e6f81858 RM |
23 | /* USB3 adapters use always HopID of 8 for both directions */ |
24 | #define TB_USB3_HOPID 8 | |
25 | ||
26 | #define TB_USB3_PATH_DOWN 0 | |
27 | #define TB_USB3_PATH_UP 1 | |
28 | ||
4f807e47 MW |
29 | /* DP adapters use HopID 8 for AUX and 9 for Video */ |
30 | #define TB_DP_AUX_TX_HOPID 8 | |
31 | #define TB_DP_AUX_RX_HOPID 8 | |
32 | #define TB_DP_VIDEO_HOPID 9 | |
33 | ||
34 | #define TB_DP_VIDEO_PATH_OUT 0 | |
35 | #define TB_DP_AUX_PATH_OUT 1 | |
36 | #define TB_DP_AUX_PATH_IN 2 | |
37 | ||
6ed541c5 MW |
38 | /* Minimum number of credits needed for PCIe path */ |
39 | #define TB_MIN_PCIE_CREDITS 6U | |
40 | /* | |
41 | * Number of credits we try to allocate for each DMA path if not limited | |
42 | * by the host router baMaxHI. | |
43 | */ | |
44 | #define TB_DMA_CREDITS 14U | |
45 | /* Minimum number of credits for DMA path */ | |
46 | #define TB_MIN_DMA_CREDITS 1U | |
47 | ||
6ce35635 MW |
48 | static bool bw_alloc_mode = true; |
49 | module_param(bw_alloc_mode, bool, 0444); | |
50 | MODULE_PARM_DESC(bw_alloc_mode, | |
51 | "enable bandwidth allocation mode if supported (default: true)"); | |
52 | ||
e6f81858 | 53 | static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; |
4f807e47 | 54 | |
3364f0c1 AN |
55 | #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ |
56 | do { \ | |
93f36ade | 57 | struct tb_tunnel *__tunnel = (tunnel); \ |
49f2b350 | 58 | level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ |
93f36ade MW |
59 | tb_route(__tunnel->src_port->sw), \ |
60 | __tunnel->src_port->port, \ | |
61 | tb_route(__tunnel->dst_port->sw), \ | |
62 | __tunnel->dst_port->port, \ | |
4f807e47 | 63 | tb_tunnel_names[__tunnel->type], \ |
3364f0c1 AN |
64 | ## arg); \ |
65 | } while (0) | |
66 | ||
67 | #define tb_tunnel_WARN(tunnel, fmt, arg...) \ | |
68 | __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) | |
69 | #define tb_tunnel_warn(tunnel, fmt, arg...) \ | |
70 | __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) | |
71 | #define tb_tunnel_info(tunnel, fmt, arg...) \ | |
72 | __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) | |
0414bec5 MW |
73 | #define tb_tunnel_dbg(tunnel, fmt, arg...) \ |
74 | __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) | |
3364f0c1 | 75 | |
6ed541c5 MW |
76 | static inline unsigned int tb_usable_credits(const struct tb_port *port) |
77 | { | |
78 | return port->total_credits - port->ctl_credits; | |
79 | } | |
80 | ||
81 | /** | |
82 | * tb_available_credits() - Available credits for PCIe and DMA | |
83 | * @port: Lane adapter to check | |
84 | * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP | |
85 | * streams possible through this lane adapter | |
86 | */ | |
87 | static unsigned int tb_available_credits(const struct tb_port *port, | |
88 | size_t *max_dp_streams) | |
89 | { | |
90 | const struct tb_switch *sw = port->sw; | |
91 | int credits, usb3, pcie, spare; | |
92 | size_t ndp; | |
93 | ||
94 | usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; | |
95 | pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; | |
96 | ||
97 | if (tb_acpi_is_xdomain_allowed()) { | |
98 | spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS); | |
99 | /* Add some credits for potential second DMA tunnel */ | |
100 | spare += TB_MIN_DMA_CREDITS; | |
101 | } else { | |
102 | spare = 0; | |
103 | } | |
104 | ||
105 | credits = tb_usable_credits(port); | |
106 | if (tb_acpi_may_tunnel_dp()) { | |
107 | /* | |
108 | * Maximum number of DP streams possible through the | |
109 | * lane adapter. | |
110 | */ | |
93bf344f GF |
111 | if (sw->min_dp_aux_credits + sw->min_dp_main_credits) |
112 | ndp = (credits - (usb3 + pcie + spare)) / | |
113 | (sw->min_dp_aux_credits + sw->min_dp_main_credits); | |
114 | else | |
115 | ndp = 0; | |
6ed541c5 MW |
116 | } else { |
117 | ndp = 0; | |
118 | } | |
119 | credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); | |
120 | credits -= usb3; | |
121 | ||
122 | if (max_dp_streams) | |
123 | *max_dp_streams = ndp; | |
124 | ||
125 | return credits > 0 ? credits : 0; | |
126 | } | |
127 | ||
4f807e47 MW |
128 | static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, |
129 | enum tb_tunnel_type type) | |
93f36ade MW |
130 | { |
131 | struct tb_tunnel *tunnel; | |
132 | ||
133 | tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); | |
134 | if (!tunnel) | |
135 | return NULL; | |
136 | ||
137 | tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); | |
138 | if (!tunnel->paths) { | |
139 | tb_tunnel_free(tunnel); | |
140 | return NULL; | |
141 | } | |
142 | ||
143 | INIT_LIST_HEAD(&tunnel->list); | |
144 | tunnel->tb = tb; | |
145 | tunnel->npaths = npaths; | |
4f807e47 | 146 | tunnel->type = type; |
93f36ade MW |
147 | |
148 | return tunnel; | |
149 | } | |
150 | ||
151 | static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) | |
152 | { | |
153 | int res; | |
154 | ||
155 | res = tb_pci_port_enable(tunnel->src_port, activate); | |
156 | if (res) | |
157 | return res; | |
158 | ||
0414bec5 MW |
159 | if (tb_port_is_pcie_up(tunnel->dst_port)) |
160 | return tb_pci_port_enable(tunnel->dst_port, activate); | |
161 | ||
162 | return 0; | |
93f36ade MW |
163 | } |
164 | ||
6ed541c5 | 165 | static int tb_pci_init_credits(struct tb_path_hop *hop) |
91c0c120 | 166 | { |
6ed541c5 MW |
167 | struct tb_port *port = hop->in_port; |
168 | struct tb_switch *sw = port->sw; | |
169 | unsigned int credits; | |
170 | ||
171 | if (tb_port_use_credit_allocation(port)) { | |
172 | unsigned int available; | |
173 | ||
174 | available = tb_available_credits(port, NULL); | |
175 | credits = min(sw->max_pcie_credits, available); | |
176 | ||
177 | if (credits < TB_MIN_PCIE_CREDITS) | |
178 | return -ENOSPC; | |
179 | ||
180 | credits = max(TB_MIN_PCIE_CREDITS, credits); | |
181 | } else { | |
182 | if (tb_port_is_null(port)) | |
183 | credits = port->bonded ? 32 : 16; | |
184 | else | |
185 | credits = 7; | |
91c0c120 MW |
186 | } |
187 | ||
6ed541c5 MW |
188 | hop->initial_credits = credits; |
189 | return 0; | |
91c0c120 MW |
190 | } |
191 | ||
6ed541c5 | 192 | static int tb_pci_init_path(struct tb_path *path) |
3364f0c1 | 193 | { |
6ed541c5 MW |
194 | struct tb_path_hop *hop; |
195 | ||
3364f0c1 AN |
196 | path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; |
197 | path->egress_shared_buffer = TB_PATH_NONE; | |
198 | path->ingress_fc_enable = TB_PATH_ALL; | |
199 | path->ingress_shared_buffer = TB_PATH_NONE; | |
200 | path->priority = 3; | |
201 | path->weight = 1; | |
202 | path->drop_packages = 0; | |
6ed541c5 MW |
203 | |
204 | tb_path_for_each_hop(path, hop) { | |
205 | int ret; | |
206 | ||
207 | ret = tb_pci_init_credits(hop); | |
208 | if (ret) | |
209 | return ret; | |
210 | } | |
211 | ||
212 | return 0; | |
0414bec5 MW |
213 | } |
214 | ||
215 | /** | |
216 | * tb_tunnel_discover_pci() - Discover existing PCIe tunnels | |
217 | * @tb: Pointer to the domain structure | |
218 | * @down: PCIe downstream adapter | |
43bddb26 | 219 | * @alloc_hopid: Allocate HopIDs from visited ports |
0414bec5 MW |
220 | * |
221 | * If @down adapter is active, follows the tunnel to the PCIe upstream | |
222 | * adapter and back. Returns the discovered tunnel or %NULL if there was | |
223 | * no tunnel. | |
224 | */ | |
43bddb26 MW |
225 | struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, |
226 | bool alloc_hopid) | |
0414bec5 MW |
227 | { |
228 | struct tb_tunnel *tunnel; | |
229 | struct tb_path *path; | |
230 | ||
231 | if (!tb_pci_port_is_enabled(down)) | |
232 | return NULL; | |
233 | ||
4f807e47 | 234 | tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); |
0414bec5 MW |
235 | if (!tunnel) |
236 | return NULL; | |
237 | ||
238 | tunnel->activate = tb_pci_activate; | |
239 | tunnel->src_port = down; | |
240 | ||
241 | /* | |
242 | * Discover both paths even if they are not complete. We will | |
243 | * clean them up by calling tb_tunnel_deactivate() below in that | |
244 | * case. | |
245 | */ | |
246 | path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, | |
43bddb26 | 247 | &tunnel->dst_port, "PCIe Up", alloc_hopid); |
0414bec5 MW |
248 | if (!path) { |
249 | /* Just disable the downstream port */ | |
250 | tb_pci_port_enable(down, false); | |
251 | goto err_free; | |
252 | } | |
253 | tunnel->paths[TB_PCI_PATH_UP] = path; | |
6ed541c5 MW |
254 | if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) |
255 | goto err_free; | |
0414bec5 MW |
256 | |
257 | path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, | |
43bddb26 | 258 | "PCIe Down", alloc_hopid); |
0414bec5 MW |
259 | if (!path) |
260 | goto err_deactivate; | |
261 | tunnel->paths[TB_PCI_PATH_DOWN] = path; | |
6ed541c5 MW |
262 | if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) |
263 | goto err_deactivate; | |
0414bec5 MW |
264 | |
265 | /* Validate that the tunnel is complete */ | |
266 | if (!tb_port_is_pcie_up(tunnel->dst_port)) { | |
267 | tb_port_warn(tunnel->dst_port, | |
268 | "path does not end on a PCIe adapter, cleaning up\n"); | |
269 | goto err_deactivate; | |
270 | } | |
271 | ||
272 | if (down != tunnel->src_port) { | |
273 | tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); | |
274 | goto err_deactivate; | |
275 | } | |
276 | ||
277 | if (!tb_pci_port_is_enabled(tunnel->dst_port)) { | |
278 | tb_tunnel_warn(tunnel, | |
279 | "tunnel is not fully activated, cleaning up\n"); | |
280 | goto err_deactivate; | |
281 | } | |
282 | ||
283 | tb_tunnel_dbg(tunnel, "discovered\n"); | |
284 | return tunnel; | |
285 | ||
286 | err_deactivate: | |
287 | tb_tunnel_deactivate(tunnel); | |
288 | err_free: | |
289 | tb_tunnel_free(tunnel); | |
290 | ||
291 | return NULL; | |
3364f0c1 AN |
292 | } |
293 | ||
294 | /** | |
93f36ade MW |
295 | * tb_tunnel_alloc_pci() - allocate a pci tunnel |
296 | * @tb: Pointer to the domain structure | |
297 | * @up: PCIe upstream adapter port | |
298 | * @down: PCIe downstream adapter port | |
3364f0c1 AN |
299 | * |
300 | * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and | |
301 | * TB_TYPE_PCIE_DOWN. | |
302 | * | |
93f36ade | 303 | * Return: Returns a tb_tunnel on success or NULL on failure. |
3364f0c1 | 304 | */ |
93f36ade MW |
305 | struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, |
306 | struct tb_port *down) | |
3364f0c1 | 307 | { |
93f36ade | 308 | struct tb_tunnel *tunnel; |
8c7acaaf | 309 | struct tb_path *path; |
93f36ade | 310 | |
4f807e47 | 311 | tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); |
3364f0c1 | 312 | if (!tunnel) |
93f36ade | 313 | return NULL; |
3364f0c1 | 314 | |
93f36ade MW |
315 | tunnel->activate = tb_pci_activate; |
316 | tunnel->src_port = down; | |
317 | tunnel->dst_port = up; | |
318 | ||
8c7acaaf MW |
319 | path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0, |
320 | "PCIe Down"); | |
6ed541c5 MW |
321 | if (!path) |
322 | goto err_free; | |
ce19f91e | 323 | tunnel->paths[TB_PCI_PATH_DOWN] = path; |
6ed541c5 MW |
324 | if (tb_pci_init_path(path)) |
325 | goto err_free; | |
93f36ade | 326 | |
8c7acaaf MW |
327 | path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, |
328 | "PCIe Up"); | |
6ed541c5 MW |
329 | if (!path) |
330 | goto err_free; | |
ce19f91e | 331 | tunnel->paths[TB_PCI_PATH_UP] = path; |
6ed541c5 MW |
332 | if (tb_pci_init_path(path)) |
333 | goto err_free; | |
93f36ade MW |
334 | |
335 | return tunnel; | |
6ed541c5 MW |
336 | |
337 | err_free: | |
338 | tb_tunnel_free(tunnel); | |
339 | return NULL; | |
3364f0c1 AN |
340 | } |
341 | ||
b0407983 MW |
342 | static bool tb_dp_is_usb4(const struct tb_switch *sw) |
343 | { | |
344 | /* Titan Ridge DP adapters need the same treatment as USB4 */ | |
345 | return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); | |
346 | } | |
347 | ||
fe1a1cf7 MW |
348 | static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out, |
349 | int timeout_msec) | |
de718ac7 | 350 | { |
fe1a1cf7 | 351 | ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); |
de718ac7 MW |
352 | u32 val; |
353 | int ret; | |
354 | ||
355 | /* Both ends need to support this */ | |
b0407983 | 356 | if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) |
de718ac7 MW |
357 | return 0; |
358 | ||
359 | ret = tb_port_read(out, &val, TB_CFG_PORT, | |
360 | out->cap_adap + DP_STATUS_CTRL, 1); | |
361 | if (ret) | |
362 | return ret; | |
363 | ||
364 | val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS; | |
365 | ||
366 | ret = tb_port_write(out, &val, TB_CFG_PORT, | |
367 | out->cap_adap + DP_STATUS_CTRL, 1); | |
368 | if (ret) | |
369 | return ret; | |
370 | ||
371 | do { | |
372 | ret = tb_port_read(out, &val, TB_CFG_PORT, | |
373 | out->cap_adap + DP_STATUS_CTRL, 1); | |
374 | if (ret) | |
375 | return ret; | |
376 | if (!(val & DP_STATUS_CTRL_CMHS)) | |
377 | return 0; | |
fe1a1cf7 MW |
378 | usleep_range(100, 150); |
379 | } while (ktime_before(ktime_get(), timeout)); | |
de718ac7 MW |
380 | |
381 | return -ETIMEDOUT; | |
382 | } | |
383 | ||
a11b88ad MW |
384 | static inline u32 tb_dp_cap_get_rate(u32 val) |
385 | { | |
386 | u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; | |
387 | ||
388 | switch (rate) { | |
389 | case DP_COMMON_CAP_RATE_RBR: | |
390 | return 1620; | |
391 | case DP_COMMON_CAP_RATE_HBR: | |
392 | return 2700; | |
393 | case DP_COMMON_CAP_RATE_HBR2: | |
394 | return 5400; | |
395 | case DP_COMMON_CAP_RATE_HBR3: | |
396 | return 8100; | |
397 | default: | |
398 | return 0; | |
399 | } | |
400 | } | |
401 | ||
402 | static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) | |
403 | { | |
404 | val &= ~DP_COMMON_CAP_RATE_MASK; | |
405 | switch (rate) { | |
406 | default: | |
407 | WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); | |
df561f66 | 408 | fallthrough; |
a11b88ad MW |
409 | case 1620: |
410 | val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; | |
411 | break; | |
412 | case 2700: | |
413 | val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT; | |
414 | break; | |
415 | case 5400: | |
416 | val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT; | |
417 | break; | |
418 | case 8100: | |
419 | val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT; | |
420 | break; | |
421 | } | |
422 | return val; | |
423 | } | |
424 | ||
425 | static inline u32 tb_dp_cap_get_lanes(u32 val) | |
426 | { | |
427 | u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT; | |
428 | ||
429 | switch (lanes) { | |
430 | case DP_COMMON_CAP_1_LANE: | |
431 | return 1; | |
432 | case DP_COMMON_CAP_2_LANES: | |
433 | return 2; | |
434 | case DP_COMMON_CAP_4_LANES: | |
435 | return 4; | |
436 | default: | |
437 | return 0; | |
438 | } | |
439 | } | |
440 | ||
441 | static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) | |
442 | { | |
443 | val &= ~DP_COMMON_CAP_LANES_MASK; | |
444 | switch (lanes) { | |
445 | default: | |
446 | WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", | |
447 | lanes); | |
df561f66 | 448 | fallthrough; |
a11b88ad MW |
449 | case 1: |
450 | val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; | |
451 | break; | |
452 | case 2: | |
453 | val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT; | |
454 | break; | |
455 | case 4: | |
456 | val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT; | |
457 | break; | |
458 | } | |
459 | return val; | |
460 | } | |
461 | ||
462 | static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) | |
463 | { | |
464 | /* Tunneling removes the DP 8b/10b encoding */ | |
465 | return rate * lanes * 8 / 10; | |
466 | } | |
467 | ||
468 | static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes, | |
469 | u32 out_rate, u32 out_lanes, u32 *new_rate, | |
470 | u32 *new_lanes) | |
471 | { | |
472 | static const u32 dp_bw[][2] = { | |
473 | /* Mb/s, lanes */ | |
474 | { 8100, 4 }, /* 25920 Mb/s */ | |
475 | { 5400, 4 }, /* 17280 Mb/s */ | |
476 | { 8100, 2 }, /* 12960 Mb/s */ | |
477 | { 2700, 4 }, /* 8640 Mb/s */ | |
478 | { 5400, 2 }, /* 8640 Mb/s */ | |
479 | { 8100, 1 }, /* 6480 Mb/s */ | |
480 | { 1620, 4 }, /* 5184 Mb/s */ | |
481 | { 5400, 1 }, /* 4320 Mb/s */ | |
482 | { 2700, 2 }, /* 4320 Mb/s */ | |
483 | { 1620, 2 }, /* 2592 Mb/s */ | |
484 | { 2700, 1 }, /* 2160 Mb/s */ | |
485 | { 1620, 1 }, /* 1296 Mb/s */ | |
486 | }; | |
487 | unsigned int i; | |
488 | ||
489 | /* | |
490 | * Find a combination that can fit into max_bw and does not | |
491 | * exceed the maximum rate and lanes supported by the DP OUT and | |
492 | * DP IN adapters. | |
493 | */ | |
494 | for (i = 0; i < ARRAY_SIZE(dp_bw); i++) { | |
495 | if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes) | |
496 | continue; | |
497 | ||
498 | if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes) | |
499 | continue; | |
500 | ||
501 | if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { | |
502 | *new_rate = dp_bw[i][0]; | |
503 | *new_lanes = dp_bw[i][1]; | |
504 | return 0; | |
505 | } | |
506 | } | |
507 | ||
508 | return -ENOSR; | |
509 | } | |
510 | ||
4f807e47 MW |
511 | static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) |
512 | { | |
a11b88ad | 513 | u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw; |
4f807e47 MW |
514 | struct tb_port *out = tunnel->dst_port; |
515 | struct tb_port *in = tunnel->src_port; | |
0bd680cd | 516 | int ret, max_bw; |
4f807e47 MW |
517 | |
518 | /* | |
519 | * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for | |
520 | * newer generation hardware. | |
521 | */ | |
522 | if (in->sw->generation < 2 || out->sw->generation < 2) | |
523 | return 0; | |
524 | ||
de718ac7 MW |
525 | /* |
526 | * Perform connection manager handshake between IN and OUT ports | |
527 | * before capabilities exchange can take place. | |
528 | */ | |
fe1a1cf7 | 529 | ret = tb_dp_cm_handshake(in, out, 1500); |
de718ac7 MW |
530 | if (ret) |
531 | return ret; | |
532 | ||
4f807e47 MW |
533 | /* Read both DP_LOCAL_CAP registers */ |
534 | ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, | |
98176380 | 535 | in->cap_adap + DP_LOCAL_CAP, 1); |
4f807e47 MW |
536 | if (ret) |
537 | return ret; | |
538 | ||
539 | ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, | |
98176380 | 540 | out->cap_adap + DP_LOCAL_CAP, 1); |
4f807e47 MW |
541 | if (ret) |
542 | return ret; | |
543 | ||
544 | /* Write IN local caps to OUT remote caps */ | |
545 | ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT, | |
98176380 | 546 | out->cap_adap + DP_REMOTE_CAP, 1); |
4f807e47 MW |
547 | if (ret) |
548 | return ret; | |
549 | ||
a11b88ad MW |
550 | in_rate = tb_dp_cap_get_rate(in_dp_cap); |
551 | in_lanes = tb_dp_cap_get_lanes(in_dp_cap); | |
552 | tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", | |
553 | in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); | |
554 | ||
555 | /* | |
556 | * If the tunnel bandwidth is limited (max_bw is set) then see | |
557 | * if we need to reduce bandwidth to fit there. | |
558 | */ | |
559 | out_rate = tb_dp_cap_get_rate(out_dp_cap); | |
560 | out_lanes = tb_dp_cap_get_lanes(out_dp_cap); | |
561 | bw = tb_dp_bandwidth(out_rate, out_lanes); | |
562 | tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", | |
563 | out_rate, out_lanes, bw); | |
564 | ||
0bd680cd MW |
565 | if (in->sw->config.depth < out->sw->config.depth) |
566 | max_bw = tunnel->max_down; | |
567 | else | |
568 | max_bw = tunnel->max_up; | |
569 | ||
570 | if (max_bw && bw > max_bw) { | |
a11b88ad MW |
571 | u32 new_rate, new_lanes, new_bw; |
572 | ||
0bd680cd | 573 | ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes, |
a11b88ad MW |
574 | out_rate, out_lanes, &new_rate, |
575 | &new_lanes); | |
576 | if (ret) { | |
577 | tb_port_info(out, "not enough bandwidth for DP tunnel\n"); | |
578 | return ret; | |
579 | } | |
580 | ||
581 | new_bw = tb_dp_bandwidth(new_rate, new_lanes); | |
582 | tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", | |
583 | new_rate, new_lanes, new_bw); | |
584 | ||
585 | /* | |
586 | * Set new rate and number of lanes before writing it to | |
587 | * the IN port remote caps. | |
588 | */ | |
589 | out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate); | |
590 | out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); | |
591 | } | |
592 | ||
3eddfc12 MW |
593 | /* |
594 | * Titan Ridge does not disable AUX timers when it gets | |
595 | * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with | |
596 | * DP tunneling. | |
597 | */ | |
598 | if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { | |
599 | out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; | |
600 | tb_port_dbg(out, "disabling LTTPR\n"); | |
601 | } | |
602 | ||
4f807e47 | 603 | return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, |
98176380 | 604 | in->cap_adap + DP_REMOTE_CAP, 1); |
4f807e47 MW |
605 | } |
606 | ||
6ce35635 MW |
607 | static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel) |
608 | { | |
609 | int ret, estimated_bw, granularity, tmp; | |
610 | struct tb_port *out = tunnel->dst_port; | |
611 | struct tb_port *in = tunnel->src_port; | |
612 | u32 out_dp_cap, out_rate, out_lanes; | |
613 | u32 in_dp_cap, in_rate, in_lanes; | |
614 | u32 rate, lanes; | |
615 | ||
616 | if (!bw_alloc_mode) | |
617 | return 0; | |
618 | ||
619 | ret = usb4_dp_port_set_cm_bw_mode_supported(in, true); | |
620 | if (ret) | |
621 | return ret; | |
622 | ||
623 | ret = usb4_dp_port_set_group_id(in, in->group->index); | |
624 | if (ret) | |
625 | return ret; | |
626 | ||
627 | /* | |
628 | * Get the non-reduced rate and lanes based on the lowest | |
629 | * capability of both adapters. | |
630 | */ | |
631 | ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, | |
632 | in->cap_adap + DP_LOCAL_CAP, 1); | |
633 | if (ret) | |
634 | return ret; | |
635 | ||
636 | ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, | |
637 | out->cap_adap + DP_LOCAL_CAP, 1); | |
638 | if (ret) | |
639 | return ret; | |
640 | ||
641 | in_rate = tb_dp_cap_get_rate(in_dp_cap); | |
642 | in_lanes = tb_dp_cap_get_lanes(in_dp_cap); | |
643 | out_rate = tb_dp_cap_get_rate(out_dp_cap); | |
644 | out_lanes = tb_dp_cap_get_lanes(out_dp_cap); | |
645 | ||
646 | rate = min(in_rate, out_rate); | |
647 | lanes = min(in_lanes, out_lanes); | |
648 | tmp = tb_dp_bandwidth(rate, lanes); | |
649 | ||
650 | tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate, | |
651 | lanes, tmp); | |
652 | ||
653 | ret = usb4_dp_port_set_nrd(in, rate, lanes); | |
654 | if (ret) | |
655 | return ret; | |
656 | ||
657 | for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; | |
658 | granularity *= 2) | |
659 | ; | |
660 | ||
661 | tb_port_dbg(in, "granularity %d Mb/s\n", granularity); | |
662 | ||
663 | /* | |
664 | * Returns -EINVAL if granularity above is outside of the | |
665 | * accepted ranges. | |
666 | */ | |
667 | ret = usb4_dp_port_set_granularity(in, granularity); | |
668 | if (ret) | |
669 | return ret; | |
670 | ||
671 | /* | |
672 | * Bandwidth estimation is pretty much what we have in | |
673 | * max_up/down fields. For discovery we just read what the | |
674 | * estimation was set to. | |
675 | */ | |
676 | if (in->sw->config.depth < out->sw->config.depth) | |
677 | estimated_bw = tunnel->max_down; | |
678 | else | |
679 | estimated_bw = tunnel->max_up; | |
680 | ||
681 | tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw); | |
682 | ||
683 | ret = usb4_dp_port_set_estimated_bw(in, estimated_bw); | |
684 | if (ret) | |
685 | return ret; | |
686 | ||
687 | /* Initial allocation should be 0 according the spec */ | |
688 | ret = usb4_dp_port_allocate_bw(in, 0); | |
689 | if (ret) | |
690 | return ret; | |
691 | ||
692 | tb_port_dbg(in, "bandwidth allocation mode enabled\n"); | |
693 | return 0; | |
694 | } | |
695 | ||
696 | static int tb_dp_init(struct tb_tunnel *tunnel) | |
697 | { | |
698 | struct tb_port *in = tunnel->src_port; | |
699 | struct tb_switch *sw = in->sw; | |
700 | struct tb *tb = in->sw->tb; | |
701 | int ret; | |
702 | ||
703 | ret = tb_dp_xchg_caps(tunnel); | |
704 | if (ret) | |
705 | return ret; | |
706 | ||
707 | if (!tb_switch_is_usb4(sw)) | |
708 | return 0; | |
709 | ||
710 | if (!usb4_dp_port_bw_mode_supported(in)) | |
711 | return 0; | |
712 | ||
713 | tb_port_dbg(in, "bandwidth allocation mode supported\n"); | |
714 | ||
715 | ret = usb4_dp_port_set_cm_id(in, tb->index); | |
716 | if (ret) | |
717 | return ret; | |
718 | ||
719 | return tb_dp_bw_alloc_mode_enable(tunnel); | |
720 | } | |
721 | ||
722 | static void tb_dp_deinit(struct tb_tunnel *tunnel) | |
723 | { | |
724 | struct tb_port *in = tunnel->src_port; | |
725 | ||
726 | if (!usb4_dp_port_bw_mode_supported(in)) | |
727 | return; | |
728 | if (usb4_dp_port_bw_mode_enabled(in)) { | |
729 | usb4_dp_port_set_cm_bw_mode_supported(in, false); | |
730 | tb_port_dbg(in, "bandwidth allocation mode disabled\n"); | |
731 | } | |
732 | } | |
733 | ||
4f807e47 MW |
734 | static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) |
735 | { | |
736 | int ret; | |
737 | ||
738 | if (active) { | |
739 | struct tb_path **paths; | |
740 | int last; | |
741 | ||
742 | paths = tunnel->paths; | |
743 | last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; | |
744 | ||
745 | tb_dp_port_set_hops(tunnel->src_port, | |
746 | paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, | |
747 | paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, | |
748 | paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); | |
749 | ||
750 | tb_dp_port_set_hops(tunnel->dst_port, | |
751 | paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, | |
752 | paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, | |
753 | paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); | |
754 | } else { | |
755 | tb_dp_port_hpd_clear(tunnel->src_port); | |
756 | tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); | |
757 | if (tb_port_is_dpout(tunnel->dst_port)) | |
758 | tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); | |
759 | } | |
760 | ||
761 | ret = tb_dp_port_enable(tunnel->src_port, active); | |
762 | if (ret) | |
763 | return ret; | |
764 | ||
765 | if (tb_port_is_dpout(tunnel->dst_port)) | |
766 | return tb_dp_port_enable(tunnel->dst_port, active); | |
767 | ||
768 | return 0; | |
769 | } | |
770 | ||
6ce35635 MW |
771 | /* max_bw is rounded up to next granularity */ |
772 | static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw) | |
773 | { | |
774 | struct tb_port *in = tunnel->src_port; | |
775 | int ret, rate, lanes, nrd_bw; | |
776 | ||
777 | ret = usb4_dp_port_nrd(in, &rate, &lanes); | |
778 | if (ret) | |
779 | return ret; | |
780 | ||
781 | nrd_bw = tb_dp_bandwidth(rate, lanes); | |
782 | ||
783 | if (max_bw) { | |
784 | ret = usb4_dp_port_granularity(in); | |
785 | if (ret < 0) | |
786 | return ret; | |
787 | *max_bw = roundup(nrd_bw, ret); | |
788 | } | |
789 | ||
790 | return nrd_bw; | |
791 | } | |
792 | ||
793 | static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel, | |
794 | int *consumed_up, int *consumed_down) | |
795 | { | |
796 | struct tb_port *out = tunnel->dst_port; | |
797 | struct tb_port *in = tunnel->src_port; | |
798 | int ret, allocated_bw, max_bw; | |
799 | ||
800 | if (!usb4_dp_port_bw_mode_enabled(in)) | |
801 | return -EOPNOTSUPP; | |
802 | ||
803 | if (!tunnel->bw_mode) | |
804 | return -EOPNOTSUPP; | |
805 | ||
806 | /* Read what was allocated previously if any */ | |
807 | ret = usb4_dp_port_allocated_bw(in); | |
808 | if (ret < 0) | |
809 | return ret; | |
810 | allocated_bw = ret; | |
811 | ||
812 | ret = tb_dp_nrd_bandwidth(tunnel, &max_bw); | |
813 | if (ret < 0) | |
814 | return ret; | |
815 | if (allocated_bw == max_bw) | |
816 | allocated_bw = ret; | |
817 | ||
818 | tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n", | |
819 | allocated_bw); | |
820 | ||
821 | if (in->sw->config.depth < out->sw->config.depth) { | |
822 | *consumed_up = 0; | |
823 | *consumed_down = allocated_bw; | |
824 | } else { | |
825 | *consumed_up = allocated_bw; | |
826 | *consumed_down = 0; | |
827 | } | |
828 | ||
829 | return 0; | |
830 | } | |
831 | ||
832 | static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, | |
833 | int *allocated_down) | |
834 | { | |
835 | struct tb_port *out = tunnel->dst_port; | |
836 | struct tb_port *in = tunnel->src_port; | |
837 | ||
838 | /* | |
839 | * If we have already set the allocated bandwidth then use that. | |
840 | * Otherwise we read it from the DPRX. | |
841 | */ | |
842 | if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) { | |
843 | int ret, allocated_bw, max_bw; | |
844 | ||
845 | ret = usb4_dp_port_allocated_bw(in); | |
846 | if (ret < 0) | |
847 | return ret; | |
848 | allocated_bw = ret; | |
849 | ||
850 | ret = tb_dp_nrd_bandwidth(tunnel, &max_bw); | |
851 | if (ret < 0) | |
852 | return ret; | |
853 | if (allocated_bw == max_bw) | |
854 | allocated_bw = ret; | |
855 | ||
856 | if (in->sw->config.depth < out->sw->config.depth) { | |
857 | *allocated_up = 0; | |
858 | *allocated_down = allocated_bw; | |
859 | } else { | |
860 | *allocated_up = allocated_bw; | |
861 | *allocated_down = 0; | |
862 | } | |
863 | return 0; | |
864 | } | |
865 | ||
866 | return tunnel->consumed_bandwidth(tunnel, allocated_up, | |
867 | allocated_down); | |
868 | } | |
869 | ||
870 | static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, | |
871 | int *alloc_down) | |
872 | { | |
873 | struct tb_port *out = tunnel->dst_port; | |
874 | struct tb_port *in = tunnel->src_port; | |
875 | int max_bw, ret, tmp; | |
876 | ||
877 | if (!usb4_dp_port_bw_mode_enabled(in)) | |
878 | return -EOPNOTSUPP; | |
879 | ||
880 | ret = tb_dp_nrd_bandwidth(tunnel, &max_bw); | |
881 | if (ret < 0) | |
882 | return ret; | |
883 | ||
884 | if (in->sw->config.depth < out->sw->config.depth) { | |
885 | tmp = min(*alloc_down, max_bw); | |
886 | ret = usb4_dp_port_allocate_bw(in, tmp); | |
887 | if (ret) | |
888 | return ret; | |
889 | *alloc_down = tmp; | |
890 | *alloc_up = 0; | |
891 | } else { | |
892 | tmp = min(*alloc_up, max_bw); | |
893 | ret = usb4_dp_port_allocate_bw(in, tmp); | |
894 | if (ret) | |
895 | return ret; | |
896 | *alloc_down = 0; | |
897 | *alloc_up = tmp; | |
898 | } | |
899 | ||
900 | /* Now we can use BW mode registers to figure out the bandwidth */ | |
901 | /* TODO: need to handle discovery too */ | |
902 | tunnel->bw_mode = true; | |
903 | return 0; | |
904 | } | |
905 | ||
906 | static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, | |
907 | int timeout_msec) | |
908 | { | |
909 | ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); | |
910 | struct tb_port *in = tunnel->src_port; | |
911 | ||
912 | /* | |
913 | * Wait for DPRX done. Normally it should be already set for | |
914 | * active tunnel. | |
915 | */ | |
916 | do { | |
917 | u32 val; | |
918 | int ret; | |
919 | ||
920 | ret = tb_port_read(in, &val, TB_CFG_PORT, | |
921 | in->cap_adap + DP_COMMON_CAP, 1); | |
922 | if (ret) | |
923 | return ret; | |
924 | ||
925 | if (val & DP_COMMON_CAP_DPRX_DONE) { | |
926 | *rate = tb_dp_cap_get_rate(val); | |
927 | *lanes = tb_dp_cap_get_lanes(val); | |
928 | ||
929 | tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n", | |
930 | tb_dp_bandwidth(*rate, *lanes)); | |
931 | return 0; | |
932 | } | |
933 | usleep_range(100, 150); | |
934 | } while (ktime_before(ktime_get(), timeout)); | |
935 | ||
936 | return -ETIMEDOUT; | |
937 | } | |
938 | ||
939 | /* Read cap from tunnel DP IN */ | |
940 | static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, | |
941 | u32 *lanes) | |
942 | { | |
943 | struct tb_port *in = tunnel->src_port; | |
944 | u32 val; | |
945 | int ret; | |
946 | ||
947 | switch (cap) { | |
948 | case DP_LOCAL_CAP: | |
949 | case DP_REMOTE_CAP: | |
950 | break; | |
951 | ||
952 | default: | |
953 | tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap); | |
954 | return -EINVAL; | |
955 | } | |
956 | ||
957 | /* | |
958 | * Read from the copied remote cap so that we take into account | |
959 | * if capabilities were reduced during exchange. | |
960 | */ | |
961 | ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1); | |
962 | if (ret) | |
963 | return ret; | |
964 | ||
965 | *rate = tb_dp_cap_get_rate(val); | |
966 | *lanes = tb_dp_cap_get_lanes(val); | |
967 | ||
968 | tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap, | |
969 | tb_dp_bandwidth(*rate, *lanes)); | |
970 | return 0; | |
971 | } | |
972 | ||
973 | static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, | |
974 | int *max_down) | |
975 | { | |
976 | struct tb_port *in = tunnel->src_port; | |
977 | u32 rate, lanes; | |
978 | int ret; | |
979 | ||
980 | /* | |
981 | * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read | |
982 | * parameter values so this so we can use this to determine the | |
983 | * maximum possible bandwidth over this link. | |
984 | */ | |
985 | ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes); | |
986 | if (ret) | |
987 | return ret; | |
988 | ||
989 | if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { | |
990 | *max_up = 0; | |
991 | *max_down = tb_dp_bandwidth(rate, lanes); | |
992 | } else { | |
993 | *max_up = tb_dp_bandwidth(rate, lanes); | |
994 | *max_down = 0; | |
995 | } | |
996 | ||
997 | return 0; | |
998 | } | |
999 | ||
7c0ee8fd MW |
1000 | static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, |
1001 | int *consumed_down) | |
a11b88ad MW |
1002 | { |
1003 | struct tb_port *in = tunnel->src_port; | |
1004 | const struct tb_switch *sw = in->sw; | |
6ce35635 | 1005 | u32 rate = 0, lanes = 0; |
a11b88ad MW |
1006 | int ret; |
1007 | ||
b0407983 | 1008 | if (tb_dp_is_usb4(sw)) { |
a11b88ad | 1009 | /* |
6ce35635 MW |
1010 | * On USB4 routers check if the bandwidth allocation |
1011 | * mode is enabled first and then read the bandwidth | |
1012 | * through those registers. | |
a11b88ad | 1013 | */ |
6ce35635 MW |
1014 | ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up, |
1015 | consumed_down); | |
1016 | if (ret < 0) { | |
1017 | if (ret != -EOPNOTSUPP) | |
a11b88ad | 1018 | return ret; |
6ce35635 MW |
1019 | } else if (!ret) { |
1020 | return 0; | |
1021 | } | |
a11b88ad | 1022 | /* |
6ce35635 MW |
1023 | * Then see if the DPRX negotiation is ready and if yes |
1024 | * return that bandwidth (it may be smaller than the | |
1025 | * reduced one). Otherwise return the remote (possibly | |
1026 | * reduced) caps. | |
a11b88ad | 1027 | */ |
6ce35635 MW |
1028 | ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150); |
1029 | if (ret) { | |
1030 | if (ret == -ETIMEDOUT) | |
1031 | ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, | |
1032 | &rate, &lanes); | |
1033 | if (ret) | |
1034 | return ret; | |
1035 | } | |
1036 | } else if (sw->generation >= 2) { | |
1037 | ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); | |
a11b88ad MW |
1038 | if (ret) |
1039 | return ret; | |
a11b88ad MW |
1040 | } else { |
1041 | /* No bandwidth management for legacy devices */ | |
7c0ee8fd MW |
1042 | *consumed_up = 0; |
1043 | *consumed_down = 0; | |
a11b88ad MW |
1044 | return 0; |
1045 | } | |
1046 | ||
7c0ee8fd MW |
1047 | if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { |
1048 | *consumed_up = 0; | |
1049 | *consumed_down = tb_dp_bandwidth(rate, lanes); | |
1050 | } else { | |
1051 | *consumed_up = tb_dp_bandwidth(rate, lanes); | |
1052 | *consumed_down = 0; | |
1053 | } | |
1054 | ||
1055 | return 0; | |
a11b88ad MW |
1056 | } |
1057 | ||
6ed541c5 MW |
1058 | static void tb_dp_init_aux_credits(struct tb_path_hop *hop) |
1059 | { | |
1060 | struct tb_port *port = hop->in_port; | |
1061 | struct tb_switch *sw = port->sw; | |
1062 | ||
1063 | if (tb_port_use_credit_allocation(port)) | |
1064 | hop->initial_credits = sw->min_dp_aux_credits; | |
1065 | else | |
1066 | hop->initial_credits = 1; | |
1067 | } | |
1068 | ||
4f807e47 MW |
1069 | static void tb_dp_init_aux_path(struct tb_path *path) |
1070 | { | |
6ed541c5 | 1071 | struct tb_path_hop *hop; |
4f807e47 MW |
1072 | |
1073 | path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; | |
1074 | path->egress_shared_buffer = TB_PATH_NONE; | |
1075 | path->ingress_fc_enable = TB_PATH_ALL; | |
1076 | path->ingress_shared_buffer = TB_PATH_NONE; | |
1077 | path->priority = 2; | |
1078 | path->weight = 1; | |
1079 | ||
6ed541c5 MW |
1080 | tb_path_for_each_hop(path, hop) |
1081 | tb_dp_init_aux_credits(hop); | |
4f807e47 MW |
1082 | } |
1083 | ||
6ed541c5 | 1084 | static int tb_dp_init_video_credits(struct tb_path_hop *hop) |
4f807e47 | 1085 | { |
6ed541c5 MW |
1086 | struct tb_port *port = hop->in_port; |
1087 | struct tb_switch *sw = port->sw; | |
1088 | ||
1089 | if (tb_port_use_credit_allocation(port)) { | |
1090 | unsigned int nfc_credits; | |
1091 | size_t max_dp_streams; | |
1092 | ||
1093 | tb_available_credits(port, &max_dp_streams); | |
1094 | /* | |
1095 | * Read the number of currently allocated NFC credits | |
1096 | * from the lane adapter. Since we only use them for DP | |
1097 | * tunneling we can use that to figure out how many DP | |
1098 | * tunnels already go through the lane adapter. | |
1099 | */ | |
1100 | nfc_credits = port->config.nfc_credits & | |
1101 | ADP_CS_4_NFC_BUFFERS_MASK; | |
1102 | if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) | |
1103 | return -ENOSPC; | |
1104 | ||
1105 | hop->nfc_credits = sw->min_dp_main_credits; | |
1106 | } else { | |
1107 | hop->nfc_credits = min(port->total_credits - 2, 12U); | |
1108 | } | |
1109 | ||
1110 | return 0; | |
1111 | } | |
1112 | ||
1113 | static int tb_dp_init_video_path(struct tb_path *path) | |
1114 | { | |
1115 | struct tb_path_hop *hop; | |
4f807e47 MW |
1116 | |
1117 | path->egress_fc_enable = TB_PATH_NONE; | |
1118 | path->egress_shared_buffer = TB_PATH_NONE; | |
1119 | path->ingress_fc_enable = TB_PATH_NONE; | |
1120 | path->ingress_shared_buffer = TB_PATH_NONE; | |
1121 | path->priority = 1; | |
1122 | path->weight = 1; | |
1123 | ||
6ed541c5 MW |
1124 | tb_path_for_each_hop(path, hop) { |
1125 | int ret; | |
02c5e7c2 | 1126 | |
6ed541c5 MW |
1127 | ret = tb_dp_init_video_credits(hop); |
1128 | if (ret) | |
1129 | return ret; | |
4f807e47 | 1130 | } |
6ed541c5 MW |
1131 | |
1132 | return 0; | |
4f807e47 MW |
1133 | } |
1134 | ||
1135 | /** | |
1136 | * tb_tunnel_discover_dp() - Discover existing Display Port tunnels | |
1137 | * @tb: Pointer to the domain structure | |
1138 | * @in: DP in adapter | |
43bddb26 | 1139 | * @alloc_hopid: Allocate HopIDs from visited ports |
4f807e47 MW |
1140 | * |
1141 | * If @in adapter is active, follows the tunnel to the DP out adapter | |
1142 | * and back. Returns the discovered tunnel or %NULL if there was no | |
1143 | * tunnel. | |
1144 | * | |
1145 | * Return: DP tunnel or %NULL if no tunnel found. | |
1146 | */ | |
43bddb26 MW |
1147 | struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, |
1148 | bool alloc_hopid) | |
4f807e47 MW |
1149 | { |
1150 | struct tb_tunnel *tunnel; | |
1151 | struct tb_port *port; | |
1152 | struct tb_path *path; | |
1153 | ||
1154 | if (!tb_dp_port_is_enabled(in)) | |
1155 | return NULL; | |
1156 | ||
1157 | tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); | |
1158 | if (!tunnel) | |
1159 | return NULL; | |
1160 | ||
6ce35635 MW |
1161 | tunnel->init = tb_dp_init; |
1162 | tunnel->deinit = tb_dp_deinit; | |
4f807e47 | 1163 | tunnel->activate = tb_dp_activate; |
6ce35635 MW |
1164 | tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; |
1165 | tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; | |
1166 | tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; | |
a11b88ad | 1167 | tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; |
4f807e47 MW |
1168 | tunnel->src_port = in; |
1169 | ||
1170 | path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, | |
43bddb26 | 1171 | &tunnel->dst_port, "Video", alloc_hopid); |
4f807e47 MW |
1172 | if (!path) { |
1173 | /* Just disable the DP IN port */ | |
1174 | tb_dp_port_enable(in, false); | |
1175 | goto err_free; | |
1176 | } | |
1177 | tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; | |
6ed541c5 MW |
1178 | if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) |
1179 | goto err_free; | |
4f807e47 | 1180 | |
43bddb26 MW |
1181 | path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", |
1182 | alloc_hopid); | |
4f807e47 MW |
1183 | if (!path) |
1184 | goto err_deactivate; | |
1185 | tunnel->paths[TB_DP_AUX_PATH_OUT] = path; | |
1186 | tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); | |
1187 | ||
1188 | path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, | |
43bddb26 | 1189 | &port, "AUX RX", alloc_hopid); |
4f807e47 MW |
1190 | if (!path) |
1191 | goto err_deactivate; | |
1192 | tunnel->paths[TB_DP_AUX_PATH_IN] = path; | |
1193 | tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]); | |
1194 | ||
1195 | /* Validate that the tunnel is complete */ | |
1196 | if (!tb_port_is_dpout(tunnel->dst_port)) { | |
1197 | tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n"); | |
1198 | goto err_deactivate; | |
1199 | } | |
1200 | ||
1201 | if (!tb_dp_port_is_enabled(tunnel->dst_port)) | |
1202 | goto err_deactivate; | |
1203 | ||
1204 | if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) | |
1205 | goto err_deactivate; | |
1206 | ||
1207 | if (port != tunnel->src_port) { | |
1208 | tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); | |
1209 | goto err_deactivate; | |
1210 | } | |
1211 | ||
1212 | tb_tunnel_dbg(tunnel, "discovered\n"); | |
1213 | return tunnel; | |
1214 | ||
1215 | err_deactivate: | |
1216 | tb_tunnel_deactivate(tunnel); | |
1217 | err_free: | |
1218 | tb_tunnel_free(tunnel); | |
1219 | ||
1220 | return NULL; | |
1221 | } | |
1222 | ||
1223 | /** | |
1224 | * tb_tunnel_alloc_dp() - allocate a Display Port tunnel | |
1225 | * @tb: Pointer to the domain structure | |
1226 | * @in: DP in adapter port | |
1227 | * @out: DP out adapter port | |
9d2d0a5c | 1228 | * @link_nr: Preferred lane adapter when the link is not bonded |
0bd680cd MW |
1229 | * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 |
1230 | * if not limited) | |
1231 | * @max_down: Maximum available downstream bandwidth for the DP tunnel | |
1232 | * (%0 if not limited) | |
4f807e47 MW |
1233 | * |
1234 | * Allocates a tunnel between @in and @out that is capable of tunneling | |
1235 | * Display Port traffic. | |
1236 | * | |
1237 | * Return: Returns a tb_tunnel on success or NULL on failure. | |
1238 | */ | |
1239 | struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, | |
9d2d0a5c MW |
1240 | struct tb_port *out, int link_nr, |
1241 | int max_up, int max_down) | |
4f807e47 MW |
1242 | { |
1243 | struct tb_tunnel *tunnel; | |
1244 | struct tb_path **paths; | |
1245 | struct tb_path *path; | |
1246 | ||
1247 | if (WARN_ON(!in->cap_adap || !out->cap_adap)) | |
1248 | return NULL; | |
1249 | ||
1250 | tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); | |
1251 | if (!tunnel) | |
1252 | return NULL; | |
1253 | ||
6ce35635 MW |
1254 | tunnel->init = tb_dp_init; |
1255 | tunnel->deinit = tb_dp_deinit; | |
4f807e47 | 1256 | tunnel->activate = tb_dp_activate; |
6ce35635 MW |
1257 | tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; |
1258 | tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; | |
1259 | tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; | |
a11b88ad | 1260 | tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; |
4f807e47 MW |
1261 | tunnel->src_port = in; |
1262 | tunnel->dst_port = out; | |
0bd680cd MW |
1263 | tunnel->max_up = max_up; |
1264 | tunnel->max_down = max_down; | |
4f807e47 MW |
1265 | |
1266 | paths = tunnel->paths; | |
1267 | ||
1268 | path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, | |
9d2d0a5c | 1269 | link_nr, "Video"); |
4f807e47 MW |
1270 | if (!path) |
1271 | goto err_free; | |
6ed541c5 | 1272 | tb_dp_init_video_path(path); |
4f807e47 MW |
1273 | paths[TB_DP_VIDEO_PATH_OUT] = path; |
1274 | ||
1275 | path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, | |
9d2d0a5c | 1276 | TB_DP_AUX_TX_HOPID, link_nr, "AUX TX"); |
4f807e47 MW |
1277 | if (!path) |
1278 | goto err_free; | |
1279 | tb_dp_init_aux_path(path); | |
1280 | paths[TB_DP_AUX_PATH_OUT] = path; | |
1281 | ||
1282 | path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, | |
9d2d0a5c | 1283 | TB_DP_AUX_RX_HOPID, link_nr, "AUX RX"); |
4f807e47 MW |
1284 | if (!path) |
1285 | goto err_free; | |
1286 | tb_dp_init_aux_path(path); | |
1287 | paths[TB_DP_AUX_PATH_IN] = path; | |
1288 | ||
1289 | return tunnel; | |
1290 | ||
1291 | err_free: | |
1292 | tb_tunnel_free(tunnel); | |
1293 | return NULL; | |
1294 | } | |
1295 | ||
6ed541c5 | 1296 | static unsigned int tb_dma_available_credits(const struct tb_port *port) |
44242d6c | 1297 | { |
6ed541c5 MW |
1298 | const struct tb_switch *sw = port->sw; |
1299 | int credits; | |
44242d6c | 1300 | |
6ed541c5 MW |
1301 | credits = tb_available_credits(port, NULL); |
1302 | if (tb_acpi_may_tunnel_pcie()) | |
1303 | credits -= sw->max_pcie_credits; | |
1304 | credits -= port->dma_credits; | |
1305 | ||
1306 | return credits > 0 ? credits : 0; | |
44242d6c MW |
1307 | } |
1308 | ||
6ed541c5 | 1309 | static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits) |
44242d6c | 1310 | { |
6ed541c5 MW |
1311 | struct tb_port *port = hop->in_port; |
1312 | ||
1313 | if (tb_port_use_credit_allocation(port)) { | |
1314 | unsigned int available = tb_dma_available_credits(port); | |
1315 | ||
1316 | /* | |
1317 | * Need to have at least TB_MIN_DMA_CREDITS, otherwise | |
1318 | * DMA path cannot be established. | |
1319 | */ | |
1320 | if (available < TB_MIN_DMA_CREDITS) | |
1321 | return -ENOSPC; | |
1322 | ||
1323 | while (credits > available) | |
1324 | credits--; | |
1325 | ||
1326 | tb_port_dbg(port, "reserving %u credits for DMA path\n", | |
1327 | credits); | |
1328 | ||
1329 | port->dma_credits += credits; | |
1330 | } else { | |
1331 | if (tb_port_is_null(port)) | |
1332 | credits = port->bonded ? 14 : 6; | |
1333 | else | |
1334 | credits = min(port->total_credits, credits); | |
1335 | } | |
44242d6c | 1336 | |
6ed541c5 MW |
1337 | hop->initial_credits = credits; |
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | /* Path from lane adapter to NHI */ | |
1342 | static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) | |
1343 | { | |
1344 | struct tb_path_hop *hop; | |
1345 | unsigned int i, tmp; | |
1346 | ||
1347 | path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; | |
44242d6c MW |
1348 | path->ingress_fc_enable = TB_PATH_ALL; |
1349 | path->egress_shared_buffer = TB_PATH_NONE; | |
e5876559 | 1350 | path->ingress_shared_buffer = TB_PATH_NONE; |
44242d6c MW |
1351 | path->priority = 5; |
1352 | path->weight = 1; | |
1353 | path->clear_fc = true; | |
1354 | ||
6ed541c5 MW |
1355 | /* |
1356 | * First lane adapter is the one connected to the remote host. | |
1357 | * We don't tunnel other traffic over this link so can use all | |
1358 | * the credits (except the ones reserved for control traffic). | |
1359 | */ | |
1360 | hop = &path->hops[0]; | |
1361 | tmp = min(tb_usable_credits(hop->in_port), credits); | |
1362 | hop->initial_credits = tmp; | |
1363 | hop->in_port->dma_credits += tmp; | |
1364 | ||
1365 | for (i = 1; i < path->path_length; i++) { | |
1366 | int ret; | |
1367 | ||
1368 | ret = tb_dma_reserve_credits(&path->hops[i], credits); | |
1369 | if (ret) | |
1370 | return ret; | |
1371 | } | |
1372 | ||
1373 | return 0; | |
1374 | } | |
1375 | ||
1376 | /* Path from NHI to lane adapter */ | |
1377 | static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) | |
1378 | { | |
1379 | struct tb_path_hop *hop; | |
1380 | ||
1381 | path->egress_fc_enable = TB_PATH_ALL; | |
1382 | path->ingress_fc_enable = TB_PATH_ALL; | |
1383 | path->egress_shared_buffer = TB_PATH_NONE; | |
1384 | path->ingress_shared_buffer = TB_PATH_NONE; | |
1385 | path->priority = 5; | |
1386 | path->weight = 1; | |
1387 | path->clear_fc = true; | |
1388 | ||
1389 | tb_path_for_each_hop(path, hop) { | |
1390 | int ret; | |
1391 | ||
1392 | ret = tb_dma_reserve_credits(hop, credits); | |
1393 | if (ret) | |
1394 | return ret; | |
1395 | } | |
1396 | ||
1397 | return 0; | |
1398 | } | |
1399 | ||
1400 | static void tb_dma_release_credits(struct tb_path_hop *hop) | |
1401 | { | |
1402 | struct tb_port *port = hop->in_port; | |
1403 | ||
1404 | if (tb_port_use_credit_allocation(port)) { | |
1405 | port->dma_credits -= hop->initial_credits; | |
1406 | ||
1407 | tb_port_dbg(port, "released %u DMA path credits\n", | |
1408 | hop->initial_credits); | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | static void tb_dma_deinit_path(struct tb_path *path) | |
1413 | { | |
1414 | struct tb_path_hop *hop; | |
1415 | ||
1416 | tb_path_for_each_hop(path, hop) | |
1417 | tb_dma_release_credits(hop); | |
1418 | } | |
1419 | ||
1420 | static void tb_dma_deinit(struct tb_tunnel *tunnel) | |
1421 | { | |
1422 | int i; | |
1423 | ||
1424 | for (i = 0; i < tunnel->npaths; i++) { | |
1425 | if (!tunnel->paths[i]) | |
1426 | continue; | |
1427 | tb_dma_deinit_path(tunnel->paths[i]); | |
1428 | } | |
44242d6c MW |
1429 | } |
1430 | ||
1431 | /** | |
1432 | * tb_tunnel_alloc_dma() - allocate a DMA tunnel | |
1433 | * @tb: Pointer to the domain structure | |
1434 | * @nhi: Host controller port | |
1435 | * @dst: Destination null port which the other domain is connected to | |
44242d6c | 1436 | * @transmit_path: HopID used for transmitting packets |
180b0689 MW |
1437 | * @transmit_ring: NHI ring number used to send packets towards the |
1438 | * other domain. Set to %-1 if TX path is not needed. | |
a27ea0df | 1439 | * @receive_path: HopID used for receiving packets |
180b0689 MW |
1440 | * @receive_ring: NHI ring number used to receive packets from the |
1441 | * other domain. Set to %-1 if RX path is not needed. | |
44242d6c MW |
1442 | * |
1443 | * Return: Returns a tb_tunnel on success or NULL on failure. | |
1444 | */ | |
1445 | struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, | |
180b0689 MW |
1446 | struct tb_port *dst, int transmit_path, |
1447 | int transmit_ring, int receive_path, | |
1448 | int receive_ring) | |
44242d6c MW |
1449 | { |
1450 | struct tb_tunnel *tunnel; | |
5bf722df | 1451 | size_t npaths = 0, i = 0; |
44242d6c | 1452 | struct tb_path *path; |
6ed541c5 | 1453 | int credits; |
44242d6c | 1454 | |
180b0689 | 1455 | if (receive_ring > 0) |
5bf722df | 1456 | npaths++; |
180b0689 | 1457 | if (transmit_ring > 0) |
5bf722df MW |
1458 | npaths++; |
1459 | ||
1460 | if (WARN_ON(!npaths)) | |
1461 | return NULL; | |
1462 | ||
1463 | tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA); | |
44242d6c MW |
1464 | if (!tunnel) |
1465 | return NULL; | |
1466 | ||
44242d6c MW |
1467 | tunnel->src_port = nhi; |
1468 | tunnel->dst_port = dst; | |
6ed541c5 | 1469 | tunnel->deinit = tb_dma_deinit; |
44242d6c | 1470 | |
6ed541c5 | 1471 | credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits); |
44242d6c | 1472 | |
180b0689 | 1473 | if (receive_ring > 0) { |
5bf722df MW |
1474 | path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, |
1475 | "DMA RX"); | |
6ed541c5 MW |
1476 | if (!path) |
1477 | goto err_free; | |
5bf722df | 1478 | tunnel->paths[i++] = path; |
6ed541c5 MW |
1479 | if (tb_dma_init_rx_path(path, credits)) { |
1480 | tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n"); | |
1481 | goto err_free; | |
1482 | } | |
44242d6c | 1483 | } |
44242d6c | 1484 | |
180b0689 | 1485 | if (transmit_ring > 0) { |
5bf722df MW |
1486 | path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, |
1487 | "DMA TX"); | |
6ed541c5 MW |
1488 | if (!path) |
1489 | goto err_free; | |
5bf722df | 1490 | tunnel->paths[i++] = path; |
6ed541c5 MW |
1491 | if (tb_dma_init_tx_path(path, credits)) { |
1492 | tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n"); | |
1493 | goto err_free; | |
1494 | } | |
44242d6c | 1495 | } |
44242d6c MW |
1496 | |
1497 | return tunnel; | |
6ed541c5 MW |
1498 | |
1499 | err_free: | |
1500 | tb_tunnel_free(tunnel); | |
1501 | return NULL; | |
44242d6c MW |
1502 | } |
1503 | ||
180b0689 MW |
1504 | /** |
1505 | * tb_tunnel_match_dma() - Match DMA tunnel | |
1506 | * @tunnel: Tunnel to match | |
1507 | * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore. | |
1508 | * @transmit_ring: NHI ring number used to send packets towards the | |
1509 | * other domain. Pass %-1 to ignore. | |
1510 | * @receive_path: HopID used for receiving packets. Pass %-1 to ignore. | |
1511 | * @receive_ring: NHI ring number used to receive packets from the | |
1512 | * other domain. Pass %-1 to ignore. | |
1513 | * | |
1514 | * This function can be used to match specific DMA tunnel, if there are | |
1515 | * multiple DMA tunnels going through the same XDomain connection. | |
1516 | * Returns true if there is match and false otherwise. | |
1517 | */ | |
1518 | bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, | |
1519 | int transmit_ring, int receive_path, int receive_ring) | |
1520 | { | |
1521 | const struct tb_path *tx_path = NULL, *rx_path = NULL; | |
1522 | int i; | |
1523 | ||
1524 | if (!receive_ring || !transmit_ring) | |
1525 | return false; | |
1526 | ||
1527 | for (i = 0; i < tunnel->npaths; i++) { | |
1528 | const struct tb_path *path = tunnel->paths[i]; | |
1529 | ||
1530 | if (!path) | |
1531 | continue; | |
1532 | ||
1533 | if (tb_port_is_nhi(path->hops[0].in_port)) | |
1534 | tx_path = path; | |
1535 | else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) | |
1536 | rx_path = path; | |
1537 | } | |
1538 | ||
1539 | if (transmit_ring > 0 || transmit_path > 0) { | |
1540 | if (!tx_path) | |
1541 | return false; | |
1542 | if (transmit_ring > 0 && | |
1543 | (tx_path->hops[0].in_hop_index != transmit_ring)) | |
1544 | return false; | |
1545 | if (transmit_path > 0 && | |
1546 | (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) | |
1547 | return false; | |
1548 | } | |
1549 | ||
1550 | if (receive_ring > 0 || receive_path > 0) { | |
1551 | if (!rx_path) | |
1552 | return false; | |
1553 | if (receive_path > 0 && | |
1554 | (rx_path->hops[0].in_hop_index != receive_path)) | |
1555 | return false; | |
1556 | if (receive_ring > 0 && | |
1557 | (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) | |
1558 | return false; | |
1559 | } | |
1560 | ||
1561 | return true; | |
1562 | } | |
1563 | ||
0bd680cd MW |
1564 | static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) |
1565 | { | |
1566 | int ret, up_max_rate, down_max_rate; | |
1567 | ||
1568 | ret = usb4_usb3_port_max_link_rate(up); | |
1569 | if (ret < 0) | |
1570 | return ret; | |
1571 | up_max_rate = ret; | |
1572 | ||
1573 | ret = usb4_usb3_port_max_link_rate(down); | |
1574 | if (ret < 0) | |
1575 | return ret; | |
1576 | down_max_rate = ret; | |
1577 | ||
1578 | return min(up_max_rate, down_max_rate); | |
1579 | } | |
1580 | ||
1581 | static int tb_usb3_init(struct tb_tunnel *tunnel) | |
1582 | { | |
1583 | tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", | |
1584 | tunnel->allocated_up, tunnel->allocated_down); | |
1585 | ||
1586 | return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, | |
1587 | &tunnel->allocated_up, | |
1588 | &tunnel->allocated_down); | |
1589 | } | |
1590 | ||
e6f81858 RM |
1591 | static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) |
1592 | { | |
1593 | int res; | |
1594 | ||
1595 | res = tb_usb3_port_enable(tunnel->src_port, activate); | |
1596 | if (res) | |
1597 | return res; | |
1598 | ||
1599 | if (tb_port_is_usb3_up(tunnel->dst_port)) | |
1600 | return tb_usb3_port_enable(tunnel->dst_port, activate); | |
1601 | ||
1602 | return 0; | |
1603 | } | |
1604 | ||
0bd680cd MW |
1605 | static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, |
1606 | int *consumed_up, int *consumed_down) | |
1607 | { | |
c6da62a2 MW |
1608 | int pcie_enabled = tb_acpi_may_tunnel_pcie(); |
1609 | ||
0bd680cd | 1610 | /* |
c6da62a2 MW |
1611 | * PCIe tunneling, if enabled, affects the USB3 bandwidth so |
1612 | * take that it into account here. | |
0bd680cd | 1613 | */ |
c6da62a2 MW |
1614 | *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; |
1615 | *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; | |
0bd680cd MW |
1616 | return 0; |
1617 | } | |
1618 | ||
1619 | static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel) | |
1620 | { | |
1621 | int ret; | |
1622 | ||
1623 | ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, | |
1624 | &tunnel->allocated_up, | |
1625 | &tunnel->allocated_down); | |
1626 | if (ret) | |
1627 | return ret; | |
1628 | ||
1629 | tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n", | |
1630 | tunnel->allocated_up, tunnel->allocated_down); | |
1631 | return 0; | |
1632 | } | |
1633 | ||
1634 | static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, | |
1635 | int *available_up, | |
1636 | int *available_down) | |
1637 | { | |
1638 | int ret, max_rate, allocate_up, allocate_down; | |
1639 | ||
1640 | ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); | |
813050e0 MW |
1641 | if (ret < 0) { |
1642 | tb_tunnel_warn(tunnel, "failed to read actual link rate\n"); | |
0bd680cd | 1643 | return; |
813050e0 MW |
1644 | } else if (!ret) { |
1645 | /* Use maximum link rate if the link valid is not set */ | |
e8ff07fb | 1646 | ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); |
813050e0 MW |
1647 | if (ret < 0) { |
1648 | tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); | |
1649 | return; | |
1650 | } | |
0bd680cd | 1651 | } |
813050e0 | 1652 | |
0bd680cd MW |
1653 | /* |
1654 | * 90% of the max rate can be allocated for isochronous | |
1655 | * transfers. | |
1656 | */ | |
1657 | max_rate = ret * 90 / 100; | |
1658 | ||
1659 | /* No need to reclaim if already at maximum */ | |
1660 | if (tunnel->allocated_up >= max_rate && | |
1661 | tunnel->allocated_down >= max_rate) | |
1662 | return; | |
1663 | ||
1664 | /* Don't go lower than what is already allocated */ | |
1665 | allocate_up = min(max_rate, *available_up); | |
1666 | if (allocate_up < tunnel->allocated_up) | |
1667 | allocate_up = tunnel->allocated_up; | |
1668 | ||
1669 | allocate_down = min(max_rate, *available_down); | |
1670 | if (allocate_down < tunnel->allocated_down) | |
1671 | allocate_down = tunnel->allocated_down; | |
1672 | ||
1673 | /* If no changes no need to do more */ | |
1674 | if (allocate_up == tunnel->allocated_up && | |
1675 | allocate_down == tunnel->allocated_down) | |
1676 | return; | |
1677 | ||
1678 | ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, | |
1679 | &allocate_down); | |
1680 | if (ret) { | |
1681 | tb_tunnel_info(tunnel, "failed to allocate bandwidth\n"); | |
1682 | return; | |
1683 | } | |
1684 | ||
1685 | tunnel->allocated_up = allocate_up; | |
1686 | *available_up -= tunnel->allocated_up; | |
1687 | ||
1688 | tunnel->allocated_down = allocate_down; | |
1689 | *available_down -= tunnel->allocated_down; | |
1690 | ||
1691 | tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n", | |
1692 | tunnel->allocated_up, tunnel->allocated_down); | |
1693 | } | |
1694 | ||
6ed541c5 MW |
1695 | static void tb_usb3_init_credits(struct tb_path_hop *hop) |
1696 | { | |
1697 | struct tb_port *port = hop->in_port; | |
1698 | struct tb_switch *sw = port->sw; | |
1699 | unsigned int credits; | |
1700 | ||
1701 | if (tb_port_use_credit_allocation(port)) { | |
1702 | credits = sw->max_usb3_credits; | |
1703 | } else { | |
1704 | if (tb_port_is_null(port)) | |
1705 | credits = port->bonded ? 32 : 16; | |
1706 | else | |
1707 | credits = 7; | |
1708 | } | |
1709 | ||
1710 | hop->initial_credits = credits; | |
1711 | } | |
1712 | ||
e6f81858 RM |
1713 | static void tb_usb3_init_path(struct tb_path *path) |
1714 | { | |
6ed541c5 MW |
1715 | struct tb_path_hop *hop; |
1716 | ||
e6f81858 RM |
1717 | path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; |
1718 | path->egress_shared_buffer = TB_PATH_NONE; | |
1719 | path->ingress_fc_enable = TB_PATH_ALL; | |
1720 | path->ingress_shared_buffer = TB_PATH_NONE; | |
1721 | path->priority = 3; | |
1722 | path->weight = 3; | |
1723 | path->drop_packages = 0; | |
6ed541c5 MW |
1724 | |
1725 | tb_path_for_each_hop(path, hop) | |
1726 | tb_usb3_init_credits(hop); | |
e6f81858 RM |
1727 | } |
1728 | ||
1729 | /** | |
1730 | * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels | |
1731 | * @tb: Pointer to the domain structure | |
1732 | * @down: USB3 downstream adapter | |
43bddb26 | 1733 | * @alloc_hopid: Allocate HopIDs from visited ports |
e6f81858 RM |
1734 | * |
1735 | * If @down adapter is active, follows the tunnel to the USB3 upstream | |
1736 | * adapter and back. Returns the discovered tunnel or %NULL if there was | |
1737 | * no tunnel. | |
1738 | */ | |
43bddb26 MW |
1739 | struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, |
1740 | bool alloc_hopid) | |
e6f81858 RM |
1741 | { |
1742 | struct tb_tunnel *tunnel; | |
1743 | struct tb_path *path; | |
1744 | ||
1745 | if (!tb_usb3_port_is_enabled(down)) | |
1746 | return NULL; | |
1747 | ||
1748 | tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); | |
1749 | if (!tunnel) | |
1750 | return NULL; | |
1751 | ||
1752 | tunnel->activate = tb_usb3_activate; | |
1753 | tunnel->src_port = down; | |
1754 | ||
1755 | /* | |
1756 | * Discover both paths even if they are not complete. We will | |
1757 | * clean them up by calling tb_tunnel_deactivate() below in that | |
1758 | * case. | |
1759 | */ | |
1760 | path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, | |
43bddb26 | 1761 | &tunnel->dst_port, "USB3 Down", alloc_hopid); |
e6f81858 RM |
1762 | if (!path) { |
1763 | /* Just disable the downstream port */ | |
1764 | tb_usb3_port_enable(down, false); | |
1765 | goto err_free; | |
1766 | } | |
783735f8 MW |
1767 | tunnel->paths[TB_USB3_PATH_DOWN] = path; |
1768 | tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); | |
e6f81858 RM |
1769 | |
1770 | path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, | |
43bddb26 | 1771 | "USB3 Up", alloc_hopid); |
e6f81858 RM |
1772 | if (!path) |
1773 | goto err_deactivate; | |
783735f8 MW |
1774 | tunnel->paths[TB_USB3_PATH_UP] = path; |
1775 | tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); | |
e6f81858 RM |
1776 | |
1777 | /* Validate that the tunnel is complete */ | |
1778 | if (!tb_port_is_usb3_up(tunnel->dst_port)) { | |
1779 | tb_port_warn(tunnel->dst_port, | |
1780 | "path does not end on an USB3 adapter, cleaning up\n"); | |
1781 | goto err_deactivate; | |
1782 | } | |
1783 | ||
1784 | if (down != tunnel->src_port) { | |
1785 | tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); | |
1786 | goto err_deactivate; | |
1787 | } | |
1788 | ||
1789 | if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { | |
1790 | tb_tunnel_warn(tunnel, | |
1791 | "tunnel is not fully activated, cleaning up\n"); | |
1792 | goto err_deactivate; | |
1793 | } | |
1794 | ||
0bd680cd MW |
1795 | if (!tb_route(down->sw)) { |
1796 | int ret; | |
1797 | ||
1798 | /* | |
1799 | * Read the initial bandwidth allocation for the first | |
1800 | * hop tunnel. | |
1801 | */ | |
1802 | ret = usb4_usb3_port_allocated_bandwidth(down, | |
1803 | &tunnel->allocated_up, &tunnel->allocated_down); | |
1804 | if (ret) | |
1805 | goto err_deactivate; | |
1806 | ||
1807 | tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", | |
1808 | tunnel->allocated_up, tunnel->allocated_down); | |
1809 | ||
1810 | tunnel->init = tb_usb3_init; | |
1811 | tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; | |
1812 | tunnel->release_unused_bandwidth = | |
1813 | tb_usb3_release_unused_bandwidth; | |
1814 | tunnel->reclaim_available_bandwidth = | |
1815 | tb_usb3_reclaim_available_bandwidth; | |
1816 | } | |
1817 | ||
e6f81858 RM |
1818 | tb_tunnel_dbg(tunnel, "discovered\n"); |
1819 | return tunnel; | |
1820 | ||
1821 | err_deactivate: | |
1822 | tb_tunnel_deactivate(tunnel); | |
1823 | err_free: | |
1824 | tb_tunnel_free(tunnel); | |
1825 | ||
1826 | return NULL; | |
1827 | } | |
1828 | ||
1829 | /** | |
1830 | * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel | |
1831 | * @tb: Pointer to the domain structure | |
1832 | * @up: USB3 upstream adapter port | |
1833 | * @down: USB3 downstream adapter port | |
0bd680cd MW |
1834 | * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0 |
1835 | * if not limited). | |
1836 | * @max_down: Maximum available downstream bandwidth for the USB3 tunnel | |
1837 | * (%0 if not limited). | |
e6f81858 RM |
1838 | * |
1839 | * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and | |
1840 | * @TB_TYPE_USB3_DOWN. | |
1841 | * | |
1842 | * Return: Returns a tb_tunnel on success or %NULL on failure. | |
1843 | */ | |
1844 | struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, | |
0bd680cd MW |
1845 | struct tb_port *down, int max_up, |
1846 | int max_down) | |
e6f81858 RM |
1847 | { |
1848 | struct tb_tunnel *tunnel; | |
1849 | struct tb_path *path; | |
0bd680cd MW |
1850 | int max_rate = 0; |
1851 | ||
1852 | /* | |
1853 | * Check that we have enough bandwidth available for the new | |
1854 | * USB3 tunnel. | |
1855 | */ | |
1856 | if (max_up > 0 || max_down > 0) { | |
1857 | max_rate = tb_usb3_max_link_rate(down, up); | |
1858 | if (max_rate < 0) | |
1859 | return NULL; | |
1860 | ||
1861 | /* Only 90% can be allocated for USB3 isochronous transfers */ | |
1862 | max_rate = max_rate * 90 / 100; | |
1863 | tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", | |
1864 | max_rate); | |
1865 | ||
1866 | if (max_rate > max_up || max_rate > max_down) { | |
1867 | tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); | |
1868 | return NULL; | |
1869 | } | |
1870 | } | |
e6f81858 RM |
1871 | |
1872 | tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); | |
1873 | if (!tunnel) | |
1874 | return NULL; | |
1875 | ||
1876 | tunnel->activate = tb_usb3_activate; | |
1877 | tunnel->src_port = down; | |
1878 | tunnel->dst_port = up; | |
0bd680cd MW |
1879 | tunnel->max_up = max_up; |
1880 | tunnel->max_down = max_down; | |
e6f81858 RM |
1881 | |
1882 | path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, | |
1883 | "USB3 Down"); | |
1884 | if (!path) { | |
1885 | tb_tunnel_free(tunnel); | |
1886 | return NULL; | |
1887 | } | |
1888 | tb_usb3_init_path(path); | |
1889 | tunnel->paths[TB_USB3_PATH_DOWN] = path; | |
1890 | ||
1891 | path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, | |
1892 | "USB3 Up"); | |
1893 | if (!path) { | |
1894 | tb_tunnel_free(tunnel); | |
1895 | return NULL; | |
1896 | } | |
1897 | tb_usb3_init_path(path); | |
1898 | tunnel->paths[TB_USB3_PATH_UP] = path; | |
1899 | ||
0bd680cd MW |
1900 | if (!tb_route(down->sw)) { |
1901 | tunnel->allocated_up = max_rate; | |
1902 | tunnel->allocated_down = max_rate; | |
1903 | ||
1904 | tunnel->init = tb_usb3_init; | |
1905 | tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; | |
1906 | tunnel->release_unused_bandwidth = | |
1907 | tb_usb3_release_unused_bandwidth; | |
1908 | tunnel->reclaim_available_bandwidth = | |
1909 | tb_usb3_reclaim_available_bandwidth; | |
1910 | } | |
1911 | ||
e6f81858 RM |
1912 | return tunnel; |
1913 | } | |
1914 | ||
3364f0c1 | 1915 | /** |
93f36ade MW |
1916 | * tb_tunnel_free() - free a tunnel |
1917 | * @tunnel: Tunnel to be freed | |
3364f0c1 | 1918 | * |
ab9f31cf | 1919 | * Frees a tunnel. The tunnel does not need to be deactivated. |
3364f0c1 | 1920 | */ |
93f36ade | 1921 | void tb_tunnel_free(struct tb_tunnel *tunnel) |
3364f0c1 | 1922 | { |
93f36ade MW |
1923 | int i; |
1924 | ||
1925 | if (!tunnel) | |
3364f0c1 | 1926 | return; |
93f36ade | 1927 | |
6ed541c5 MW |
1928 | if (tunnel->deinit) |
1929 | tunnel->deinit(tunnel); | |
1930 | ||
93f36ade MW |
1931 | for (i = 0; i < tunnel->npaths; i++) { |
1932 | if (tunnel->paths[i]) | |
1933 | tb_path_free(tunnel->paths[i]); | |
1934 | } | |
1935 | ||
1936 | kfree(tunnel->paths); | |
3364f0c1 AN |
1937 | kfree(tunnel); |
1938 | } | |
1939 | ||
1940 | /** | |
93f36ade MW |
1941 | * tb_tunnel_is_invalid - check whether an activated path is still valid |
1942 | * @tunnel: Tunnel to check | |
3364f0c1 | 1943 | */ |
93f36ade | 1944 | bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) |
3364f0c1 | 1945 | { |
93f36ade | 1946 | int i; |
3364f0c1 | 1947 | |
93f36ade MW |
1948 | for (i = 0; i < tunnel->npaths; i++) { |
1949 | WARN_ON(!tunnel->paths[i]->activated); | |
1950 | if (tb_path_is_invalid(tunnel->paths[i])) | |
1951 | return true; | |
1952 | } | |
3364f0c1 | 1953 | |
93f36ade | 1954 | return false; |
3364f0c1 AN |
1955 | } |
1956 | ||
1957 | /** | |
93f36ade MW |
1958 | * tb_tunnel_restart() - activate a tunnel after a hardware reset |
1959 | * @tunnel: Tunnel to restart | |
1960 | * | |
1961 | * Return: 0 on success and negative errno in case if failure | |
3364f0c1 | 1962 | */ |
93f36ade | 1963 | int tb_tunnel_restart(struct tb_tunnel *tunnel) |
3364f0c1 | 1964 | { |
93f36ade | 1965 | int res, i; |
3364f0c1 | 1966 | |
62efe699 | 1967 | tb_tunnel_dbg(tunnel, "activating\n"); |
3364f0c1 | 1968 | |
aae9e27f MW |
1969 | /* |
1970 | * Make sure all paths are properly disabled before enabling | |
1971 | * them again. | |
1972 | */ | |
1973 | for (i = 0; i < tunnel->npaths; i++) { | |
1974 | if (tunnel->paths[i]->activated) { | |
1975 | tb_path_deactivate(tunnel->paths[i]); | |
1976 | tunnel->paths[i]->activated = false; | |
1977 | } | |
1978 | } | |
1979 | ||
4f807e47 MW |
1980 | if (tunnel->init) { |
1981 | res = tunnel->init(tunnel); | |
1982 | if (res) | |
1983 | return res; | |
1984 | } | |
1985 | ||
93f36ade | 1986 | for (i = 0; i < tunnel->npaths; i++) { |
93f36ade MW |
1987 | res = tb_path_activate(tunnel->paths[i]); |
1988 | if (res) | |
1989 | goto err; | |
1990 | } | |
3364f0c1 | 1991 | |
93f36ade MW |
1992 | if (tunnel->activate) { |
1993 | res = tunnel->activate(tunnel, true); | |
1994 | if (res) | |
1995 | goto err; | |
1996 | } | |
3364f0c1 | 1997 | |
3364f0c1 | 1998 | return 0; |
93f36ade | 1999 | |
3364f0c1 AN |
2000 | err: |
2001 | tb_tunnel_warn(tunnel, "activation failed\n"); | |
93f36ade | 2002 | tb_tunnel_deactivate(tunnel); |
3364f0c1 AN |
2003 | return res; |
2004 | } | |
2005 | ||
2006 | /** | |
93f36ade MW |
2007 | * tb_tunnel_activate() - activate a tunnel |
2008 | * @tunnel: Tunnel to activate | |
3364f0c1 AN |
2009 | * |
2010 | * Return: Returns 0 on success or an error code on failure. | |
2011 | */ | |
93f36ade | 2012 | int tb_tunnel_activate(struct tb_tunnel *tunnel) |
3364f0c1 | 2013 | { |
93f36ade | 2014 | int i; |
3364f0c1 | 2015 | |
93f36ade MW |
2016 | for (i = 0; i < tunnel->npaths; i++) { |
2017 | if (tunnel->paths[i]->activated) { | |
2018 | tb_tunnel_WARN(tunnel, | |
2019 | "trying to activate an already activated tunnel\n"); | |
2020 | return -EINVAL; | |
2021 | } | |
2022 | } | |
3364f0c1 | 2023 | |
93f36ade MW |
2024 | return tb_tunnel_restart(tunnel); |
2025 | } | |
3364f0c1 AN |
2026 | |
2027 | /** | |
93f36ade MW |
2028 | * tb_tunnel_deactivate() - deactivate a tunnel |
2029 | * @tunnel: Tunnel to deactivate | |
3364f0c1 | 2030 | */ |
93f36ade | 2031 | void tb_tunnel_deactivate(struct tb_tunnel *tunnel) |
3364f0c1 | 2032 | { |
93f36ade MW |
2033 | int i; |
2034 | ||
62efe699 | 2035 | tb_tunnel_dbg(tunnel, "deactivating\n"); |
3364f0c1 | 2036 | |
93f36ade MW |
2037 | if (tunnel->activate) |
2038 | tunnel->activate(tunnel, false); | |
2039 | ||
2040 | for (i = 0; i < tunnel->npaths; i++) { | |
0414bec5 | 2041 | if (tunnel->paths[i] && tunnel->paths[i]->activated) |
93f36ade MW |
2042 | tb_path_deactivate(tunnel->paths[i]); |
2043 | } | |
2044 | } | |
a11b88ad MW |
2045 | |
2046 | /** | |
0bd680cd | 2047 | * tb_tunnel_port_on_path() - Does the tunnel go through port |
a11b88ad | 2048 | * @tunnel: Tunnel to check |
0bd680cd | 2049 | * @port: Port to check |
a11b88ad | 2050 | * |
0bd680cd | 2051 | * Returns true if @tunnel goes through @port (direction does not matter), |
a11b88ad MW |
2052 | * false otherwise. |
2053 | */ | |
0bd680cd MW |
2054 | bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, |
2055 | const struct tb_port *port) | |
a11b88ad MW |
2056 | { |
2057 | int i; | |
2058 | ||
2059 | for (i = 0; i < tunnel->npaths; i++) { | |
2060 | if (!tunnel->paths[i]) | |
2061 | continue; | |
0bd680cd MW |
2062 | |
2063 | if (tb_path_port_on_path(tunnel->paths[i], port)) | |
a11b88ad MW |
2064 | return true; |
2065 | } | |
2066 | ||
2067 | return false; | |
2068 | } | |
2069 | ||
2070 | static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) | |
2071 | { | |
2072 | int i; | |
2073 | ||
2074 | for (i = 0; i < tunnel->npaths; i++) { | |
2075 | if (!tunnel->paths[i]) | |
2076 | return false; | |
2077 | if (!tunnel->paths[i]->activated) | |
2078 | return false; | |
2079 | } | |
2080 | ||
2081 | return true; | |
2082 | } | |
2083 | ||
06cbcbfa MW |
2084 | /** |
2085 | * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth | |
2086 | * @tunnel: Tunnel to check | |
2087 | * @max_up: Maximum upstream bandwidth in Mb/s | |
2088 | * @max_down: Maximum downstream bandwidth in Mb/s | |
2089 | * | |
2090 | * Returns maximum possible bandwidth this tunnel can go if not limited | |
2091 | * by other bandwidth clients. If the tunnel does not support this | |
2092 | * returns %-EOPNOTSUPP. | |
2093 | */ | |
6ce35635 MW |
2094 | int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, |
2095 | int *max_down) | |
2096 | { | |
2097 | if (!tb_tunnel_is_active(tunnel)) | |
2098 | return -EINVAL; | |
2099 | ||
2100 | if (tunnel->maximum_bandwidth) | |
2101 | return tunnel->maximum_bandwidth(tunnel, max_up, max_down); | |
2102 | return -EOPNOTSUPP; | |
2103 | } | |
2104 | ||
2105 | /** | |
2106 | * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel | |
2107 | * @tunnel: Tunnel to check | |
2108 | * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here | |
2109 | * @allocated_down: Currently allocated downstream bandwidth in Mb/s is | |
2110 | * stored here | |
2111 | * | |
2112 | * Returns the bandwidth allocated for the tunnel. This may be higher | |
2113 | * than what the tunnel actually consumes. | |
2114 | */ | |
2115 | int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, | |
2116 | int *allocated_down) | |
2117 | { | |
2118 | if (!tb_tunnel_is_active(tunnel)) | |
2119 | return -EINVAL; | |
2120 | ||
2121 | if (tunnel->allocated_bandwidth) | |
2122 | return tunnel->allocated_bandwidth(tunnel, allocated_up, | |
2123 | allocated_down); | |
2124 | return -EOPNOTSUPP; | |
2125 | } | |
2126 | ||
2127 | /** | |
2128 | * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation | |
2129 | * @tunnel: Tunnel whose bandwidth allocation to change | |
2130 | * @alloc_up: New upstream bandwidth in Mb/s | |
2131 | * @alloc_down: New downstream bandwidth in Mb/s | |
2132 | * | |
2133 | * Tries to change tunnel bandwidth allocation. If succeeds returns %0 | |
2134 | * and updates @alloc_up and @alloc_down to that was actually allocated | |
2135 | * (it may not be the same as passed originally). Returns negative errno | |
2136 | * in case of failure. | |
2137 | */ | |
2138 | int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, | |
2139 | int *alloc_down) | |
2140 | { | |
2141 | if (!tb_tunnel_is_active(tunnel)) | |
2142 | return -EINVAL; | |
2143 | ||
2144 | if (tunnel->alloc_bandwidth) | |
2145 | return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); | |
2146 | ||
2147 | return -EOPNOTSUPP; | |
2148 | } | |
2149 | ||
a11b88ad MW |
2150 | /** |
2151 | * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel | |
2152 | * @tunnel: Tunnel to check | |
7c0ee8fd MW |
2153 | * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port. |
2154 | * Can be %NULL. | |
2155 | * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port. | |
2156 | * Can be %NULL. | |
a11b88ad | 2157 | * |
7c0ee8fd MW |
2158 | * Stores the amount of isochronous bandwidth @tunnel consumes in |
2159 | * @consumed_up and @consumed_down. In case of success returns %0, | |
2160 | * negative errno otherwise. | |
a11b88ad | 2161 | */ |
7c0ee8fd MW |
2162 | int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, |
2163 | int *consumed_down) | |
a11b88ad | 2164 | { |
7c0ee8fd MW |
2165 | int up_bw = 0, down_bw = 0; |
2166 | ||
a11b88ad | 2167 | if (!tb_tunnel_is_active(tunnel)) |
7c0ee8fd | 2168 | goto out; |
a11b88ad MW |
2169 | |
2170 | if (tunnel->consumed_bandwidth) { | |
7c0ee8fd | 2171 | int ret; |
a11b88ad | 2172 | |
7c0ee8fd MW |
2173 | ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); |
2174 | if (ret) | |
2175 | return ret; | |
2176 | ||
2177 | tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, | |
2178 | down_bw); | |
a11b88ad MW |
2179 | } |
2180 | ||
7c0ee8fd MW |
2181 | out: |
2182 | if (consumed_up) | |
2183 | *consumed_up = up_bw; | |
2184 | if (consumed_down) | |
2185 | *consumed_down = down_bw; | |
2186 | ||
a11b88ad MW |
2187 | return 0; |
2188 | } | |
0bd680cd MW |
2189 | |
2190 | /** | |
2191 | * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth | |
2192 | * @tunnel: Tunnel whose unused bandwidth to release | |
2193 | * | |
2194 | * If tunnel supports dynamic bandwidth management (USB3 tunnels at the | |
2195 | * moment) this function makes it to release all the unused bandwidth. | |
2196 | * | |
2197 | * Returns %0 in case of success and negative errno otherwise. | |
2198 | */ | |
2199 | int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) | |
2200 | { | |
2201 | if (!tb_tunnel_is_active(tunnel)) | |
2202 | return 0; | |
2203 | ||
2204 | if (tunnel->release_unused_bandwidth) { | |
2205 | int ret; | |
2206 | ||
2207 | ret = tunnel->release_unused_bandwidth(tunnel); | |
2208 | if (ret) | |
2209 | return ret; | |
2210 | } | |
2211 | ||
2212 | return 0; | |
2213 | } | |
2214 | ||
2215 | /** | |
2216 | * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth | |
2217 | * @tunnel: Tunnel reclaiming available bandwidth | |
2218 | * @available_up: Available upstream bandwidth (in Mb/s) | |
2219 | * @available_down: Available downstream bandwidth (in Mb/s) | |
2220 | * | |
2221 | * Reclaims bandwidth from @available_up and @available_down and updates | |
2222 | * the variables accordingly (e.g decreases both according to what was | |
2223 | * reclaimed by the tunnel). If nothing was reclaimed the values are | |
2224 | * kept as is. | |
2225 | */ | |
2226 | void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, | |
2227 | int *available_up, | |
2228 | int *available_down) | |
2229 | { | |
2230 | if (!tb_tunnel_is_active(tunnel)) | |
2231 | return; | |
2232 | ||
2233 | if (tunnel->reclaim_available_bandwidth) | |
2234 | tunnel->reclaim_available_bandwidth(tunnel, available_up, | |
2235 | available_down); | |
2236 | } |