Merge tag 'kvm-x86-vmx-6.5' of https://github.com/kvm-x86/linux into HEAD
[linux-block.git] / drivers / thunderbolt / tb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
d6cc51cd 2/*
15c6784c 3 * Thunderbolt driver - bus logic (NHI independent)
d6cc51cd
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
15c6784c 6 * Copyright (C) 2018, Intel Corporation
d6cc51cd
AN
7 */
8
9#ifndef TB_H_
10#define TB_H_
11
e6b245cc 12#include <linux/nvmem-provider.h>
a25c8b2f 13#include <linux/pci.h>
d1ff7024 14#include <linux/thunderbolt.h>
bfe778ac 15#include <linux/uuid.h>
b017a46d 16#include <linux/bitfield.h>
a25c8b2f
AN
17
18#include "tb_regs.h"
d6cc51cd 19#include "ctl.h"
3e136768 20#include "dma_port.h"
d6cc51cd 21
719a5fe8
MW
22#define NVM_MIN_SIZE SZ_32K
23#define NVM_MAX_SIZE SZ_512K
9b383037 24#define NVM_DATA_DWORDS 16
719a5fe8 25
7af9da8c
SM
26/* Keep link controller awake during update */
27#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
28/* Disable CLx if not supported */
29#define QUIRK_NO_CLX BIT(1)
30
e6b245cc 31/**
719a5fe8
MW
32 * struct tb_nvm - Structure holding NVM information
33 * @dev: Owner of the NVM
e6b245cc
MW
34 * @major: Major version number of the active NVM portion
35 * @minor: Minor version number of the active NVM portion
36 * @id: Identifier used with both NVM portions
37 * @active: Active portion NVMem device
aef9c693 38 * @active_size: Size in bytes of the active NVM
e6b245cc
MW
39 * @non_active: Non-active portion NVMem device
40 * @buf: Buffer where the NVM image is stored before it is written to
41 * the actual NVM flash device
aef9c693
SC
42 * @buf_data_start: Where the actual image starts after skipping
43 * possible headers
e6b245cc
MW
44 * @buf_data_size: Number of bytes actually consumed by the new NVM
45 * image
719a5fe8 46 * @authenticating: The device is authenticating the new NVM
4b794f80 47 * @flushed: The image has been flushed to the storage area
aef9c693 48 * @vops: Router vendor specific NVM operations (optional)
719a5fe8
MW
49 *
50 * The user of this structure needs to handle serialization of possible
51 * concurrent access.
e6b245cc 52 */
719a5fe8
MW
53struct tb_nvm {
54 struct device *dev;
5424e1bf
SC
55 u32 major;
56 u32 minor;
e6b245cc
MW
57 int id;
58 struct nvmem_device *active;
aef9c693 59 size_t active_size;
e6b245cc
MW
60 struct nvmem_device *non_active;
61 void *buf;
aef9c693 62 void *buf_data_start;
e6b245cc
MW
63 size_t buf_data_size;
64 bool authenticating;
4b794f80 65 bool flushed;
aef9c693 66 const struct tb_nvm_vendor_ops *vops;
e6b245cc
MW
67};
68
ff3a8306
RM
69enum tb_nvm_write_ops {
70 WRITE_AND_AUTHENTICATE = 1,
71 WRITE_ONLY = 2,
1cbf680f 72 AUTHENTICATE_ONLY = 3,
ff3a8306
RM
73};
74
f67cf491 75#define TB_SWITCH_KEY_SIZE 32
f0342e75 76#define TB_SWITCH_MAX_DEPTH 6
b0407983 77#define USB4_SWITCH_MAX_DEPTH 5
f67cf491 78
cf29b9af
RM
79/**
80 * enum tb_switch_tmu_rate - TMU refresh rate
81 * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
82 * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
83 * transmission of the Delay Request TSNOS
84 * (Time Sync Notification Ordered Set) on a Link
85 * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
86 * transmission of the Delay Request TSNOS on
87 * a Link
88 */
89enum tb_switch_tmu_rate {
90 TB_SWITCH_TMU_RATE_OFF = 0,
91 TB_SWITCH_TMU_RATE_HIFI = 16,
92 TB_SWITCH_TMU_RATE_NORMAL = 1000,
93};
94
95/**
96 * struct tb_switch_tmu - Structure holding switch TMU configuration
97 * @cap: Offset to the TMU capability (%0 if not found)
98 * @has_ucap: Does the switch support uni-directional mode
99 * @rate: TMU refresh rate related to upstream switch. In case of root
a28ec0e1 100 * switch this holds the domain rate. Reflects the HW setting.
cf29b9af 101 * @unidirectional: Is the TMU in uni-directional or bi-directional mode
a28ec0e1
GF
102 * related to upstream switch. Don't care for root switch.
103 * Reflects the HW setting.
104 * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional
105 * that is requested to be set. Related to upstream switch.
106 * Don't care for root switch.
107 * @rate_request: TMU new refresh rate related to upstream switch that is
108 * requested to be set. In case of root switch, this holds
109 * the new domain rate that is requested to be set.
cf29b9af
RM
110 */
111struct tb_switch_tmu {
112 int cap;
113 bool has_ucap;
114 enum tb_switch_tmu_rate rate;
115 bool unidirectional;
a28ec0e1
GF
116 bool unidirectional_request;
117 enum tb_switch_tmu_rate rate_request;
cf29b9af
RM
118};
119
8a90e4fa
GF
120enum tb_clx {
121 TB_CLX_DISABLE,
b017a46d 122 /* CL0s and CL1 are enabled and supported together */
3846d011
MW
123 TB_CL1 = BIT(0),
124 TB_CL2 = BIT(1),
8a90e4fa
GF
125};
126
a25c8b2f
AN
127/**
128 * struct tb_switch - a thunderbolt switch
bfe778ac
MW
129 * @dev: Device for the switch
130 * @config: Switch configuration
131 * @ports: Ports in this switch
3e136768
MW
132 * @dma_port: If the switch has port supporting DMA configuration based
133 * mailbox this will hold the pointer to that (%NULL
e6b245cc
MW
134 * otherwise). If set it also means the switch has
135 * upgradeable NVM.
cf29b9af 136 * @tmu: The switch TMU configuration
bfe778ac
MW
137 * @tb: Pointer to the domain the switch belongs to
138 * @uid: Unique ID of the switch
139 * @uuid: UUID of the switch (or %NULL if not supported)
140 * @vendor: Vendor ID of the switch
141 * @device: Device ID of the switch
72ee3390
MW
142 * @vendor_name: Name of the vendor (or %NULL if not known)
143 * @device_name: Name of the device (or %NULL if not known)
91c0c120
MW
144 * @link_speed: Speed of the link in Gb/s
145 * @link_width: Width of the link (1 or 2)
bbcf40b3 146 * @link_usb4: Upstream link is USB4
2c3c4197 147 * @generation: Switch Thunderbolt generation
bfe778ac 148 * @cap_plug_events: Offset to the plug events capability (%0 if not found)
23ccd21c 149 * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found)
a9be5582 150 * @cap_lc: Offset to the link controller capability (%0 if not found)
43f977bc 151 * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found)
bfe778ac
MW
152 * @is_unplugged: The switch is going away
153 * @drom: DROM of the switch (%NULL if not found)
e6b245cc
MW
154 * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
155 * @no_nvm_upgrade: Prevent NVM upgrade of this switch
156 * @safe_mode: The switch is in safe-mode
14862ee3 157 * @boot: Whether the switch was already authorized on boot or not
2d8ff0b5 158 * @rpm: The switch supports runtime PM
f67cf491 159 * @authorized: Whether the switch is authorized by user or policy
f67cf491 160 * @security_level: Switch supported security level
54e41810 161 * @debugfs_dir: Pointer to the debugfs structure
f67cf491
MW
162 * @key: Contains the key used to challenge the device or %NULL if not
163 * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
164 * @connection_id: Connection ID used with ICM messaging
165 * @connection_key: Connection key used with ICM messaging
166 * @link: Root switch link this switch is connected (ICM only)
167 * @depth: Depth in the chain this switch is connected (ICM only)
4f7c2e0d
MW
168 * @rpm_complete: Completion used to wait for runtime resume to
169 * complete (ICM only)
1cb36293 170 * @quirks: Quirks used for this Thunderbolt switch
56ad3aef
MW
171 * @credit_allocation: Are the below buffer allocation parameters valid
172 * @max_usb3_credits: Router preferred number of buffers for USB 3.x
173 * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX
174 * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
175 * @max_pcie_credits: Router preferred number of buffers for PCIe
176 * @max_dma_credits: Router preferred number of buffers for DMA/P2P
8a90e4fa 177 * @clx: CLx state on the upstream link of the router
f67cf491
MW
178 *
179 * When the switch is being added or removed to the domain (other
09f11b6c 180 * switches) you need to have domain lock held.
c3963a55
MW
181 *
182 * In USB4 terminology this structure represents a router.
a25c8b2f
AN
183 */
184struct tb_switch {
bfe778ac 185 struct device dev;
a25c8b2f
AN
186 struct tb_regs_switch_header config;
187 struct tb_port *ports;
3e136768 188 struct tb_dma_port *dma_port;
cf29b9af 189 struct tb_switch_tmu tmu;
a25c8b2f 190 struct tb *tb;
c90553b3 191 u64 uid;
7c39ffe7 192 uuid_t *uuid;
bfe778ac
MW
193 u16 vendor;
194 u16 device;
72ee3390
MW
195 const char *vendor_name;
196 const char *device_name;
91c0c120
MW
197 unsigned int link_speed;
198 unsigned int link_width;
bbcf40b3 199 bool link_usb4;
2c3c4197 200 unsigned int generation;
bfe778ac 201 int cap_plug_events;
23ccd21c 202 int cap_vsec_tmu;
a9be5582 203 int cap_lc;
43f977bc 204 int cap_lp;
bfe778ac 205 bool is_unplugged;
cd22e73b 206 u8 *drom;
719a5fe8 207 struct tb_nvm *nvm;
e6b245cc
MW
208 bool no_nvm_upgrade;
209 bool safe_mode;
14862ee3 210 bool boot;
2d8ff0b5 211 bool rpm;
f67cf491 212 unsigned int authorized;
f67cf491 213 enum tb_security_level security_level;
54e41810 214 struct dentry *debugfs_dir;
f67cf491
MW
215 u8 *key;
216 u8 connection_id;
217 u8 connection_key;
218 u8 link;
219 u8 depth;
4f7c2e0d 220 struct completion rpm_complete;
1cb36293 221 unsigned long quirks;
56ad3aef
MW
222 bool credit_allocation;
223 unsigned int max_usb3_credits;
224 unsigned int min_dp_aux_credits;
225 unsigned int min_dp_main_credits;
226 unsigned int max_pcie_credits;
227 unsigned int max_dma_credits;
8a90e4fa 228 enum tb_clx clx;
a25c8b2f
AN
229};
230
6ce35635
MW
231/**
232 * struct tb_bandwidth_group - Bandwidth management group
233 * @tb: Pointer to the domain the group belongs to
234 * @index: Index of the group (aka Group_ID). Valid values %1-%7
235 * @ports: DP IN adapters belonging to this group are linked here
236 *
237 * Any tunnel that requires isochronous bandwidth (that's DP for now) is
238 * attached to a bandwidth group. All tunnels going through the same
239 * USB4 links share the same group and can dynamically distribute the
240 * bandwidth within the group.
241 */
242struct tb_bandwidth_group {
243 struct tb *tb;
244 int index;
245 struct list_head ports;
246};
247
a25c8b2f
AN
248/**
249 * struct tb_port - a thunderbolt port, part of a tb_switch
d1ff7024
MW
250 * @config: Cached port configuration read from registers
251 * @sw: Switch the port belongs to
252 * @remote: Remote port (%NULL if not connected)
253 * @xdomain: Remote host (%NULL if not connected)
254 * @cap_phy: Offset, zero if not found
cf29b9af 255 * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
56183c88 256 * @cap_adap: Offset of the adapter specific capability (%0 if not present)
b0407983 257 * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
cae5f515 258 * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0)
d1ff7024 259 * @port: Port number on switch
8824d19b 260 * @disabled: Disabled by eeprom or enabled but not implemented
91c0c120 261 * @bonded: true if the port is bonded (two lanes combined as one)
d1ff7024
MW
262 * @dual_link_port: If the switch is connected using two ports, points
263 * to the other port.
264 * @link_nr: Is this primary or secondary port on the dual_link.
0b2863ac
MW
265 * @in_hopids: Currently allocated input HopIDs
266 * @out_hopids: Currently allocated output HopIDs
8afe909b 267 * @list: Used to link ports to DP resources list
56ad3aef
MW
268 * @total_credits: Total number of buffers available for this port
269 * @ctl_credits: Buffers reserved for control path
6ed541c5
MW
270 * @dma_credits: Number of credits allocated for DMA tunneling for all
271 * DMA paths through this port.
6ce35635
MW
272 * @group: Bandwidth allocation group the adapter is assigned to. Only
273 * used for DP IN adapters for now.
274 * @group_list: The adapter is linked to the group's list of ports through this
f0a57dd3
GF
275 * @max_bw: Maximum possible bandwidth through this adapter if set to
276 * non-zero.
c3963a55
MW
277 *
278 * In USB4 terminology this structure represents an adapter (protocol or
279 * lane adapter).
a25c8b2f
AN
280 */
281struct tb_port {
282 struct tb_regs_port_header config;
283 struct tb_switch *sw;
d1ff7024
MW
284 struct tb_port *remote;
285 struct tb_xdomain *xdomain;
286 int cap_phy;
cf29b9af 287 int cap_tmu;
56183c88 288 int cap_adap;
b0407983 289 int cap_usb4;
cae5f515 290 struct usb4_port *usb4;
d1ff7024
MW
291 u8 port;
292 bool disabled;
91c0c120 293 bool bonded;
cd22e73b
AN
294 struct tb_port *dual_link_port;
295 u8 link_nr:1;
0b2863ac
MW
296 struct ida in_hopids;
297 struct ida out_hopids;
8afe909b 298 struct list_head list;
56ad3aef
MW
299 unsigned int total_credits;
300 unsigned int ctl_credits;
6ed541c5 301 unsigned int dma_credits;
6ce35635
MW
302 struct tb_bandwidth_group *group;
303 struct list_head group_list;
f0a57dd3 304 unsigned int max_bw;
a25c8b2f
AN
305};
306
cae5f515
MW
307/**
308 * struct usb4_port - USB4 port device
309 * @dev: Device for the port
310 * @port: Pointer to the lane 0 adapter
ccc5cb8a
RM
311 * @can_offline: Does the port have necessary platform support to moved
312 * it into offline mode and back
3fb10ea4 313 * @offline: The port is currently in offline mode
d0f1e0c2 314 * @margining: Pointer to margining structure if enabled
cae5f515
MW
315 */
316struct usb4_port {
317 struct device dev;
318 struct tb_port *port;
ccc5cb8a 319 bool can_offline;
3fb10ea4 320 bool offline;
d0f1e0c2
MW
321#ifdef CONFIG_USB4_DEBUGFS_MARGINING
322 struct tb_margining *margining;
323#endif
cae5f515
MW
324};
325
dacb1287
KK
326/**
327 * tb_retimer: Thunderbolt retimer
328 * @dev: Device for the retimer
329 * @tb: Pointer to the domain the retimer belongs to
330 * @index: Retimer index facing the router USB4 port
331 * @vendor: Vendor ID of the retimer
332 * @device: Device ID of the retimer
333 * @port: Pointer to the lane 0 adapter
334 * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
aef9c693 335 * @no_nvm_upgrade: Prevent NVM upgrade of this retimer
dacb1287
KK
336 * @auth_status: Status of last NVM authentication
337 */
338struct tb_retimer {
339 struct device dev;
340 struct tb *tb;
341 u8 index;
342 u32 vendor;
343 u32 device;
344 struct tb_port *port;
345 struct tb_nvm *nvm;
aef9c693 346 bool no_nvm_upgrade;
dacb1287
KK
347 u32 auth_status;
348};
349
520b6702
AN
350/**
351 * struct tb_path_hop - routing information for a tb_path
8c7acaaf
MW
352 * @in_port: Ingress port of a switch
353 * @out_port: Egress port of a switch where the packet is routed out
354 * (must be on the same switch than @in_port)
355 * @in_hop_index: HopID where the path configuration entry is placed in
356 * the path config space of @in_port.
357 * @in_counter_index: Used counter index (not used in the driver
358 * currently, %-1 to disable)
359 * @next_hop_index: HopID of the packet when it is routed out from @out_port
0414bec5
MW
360 * @initial_credits: Number of initial flow control credits allocated for
361 * the path
02c5e7c2
MW
362 * @nfc_credits: Number of non-flow controlled buffers allocated for the
363 * @in_port.
520b6702
AN
364 *
365 * Hop configuration is always done on the IN port of a switch.
366 * in_port and out_port have to be on the same switch. Packets arriving on
367 * in_port with "hop" = in_hop_index will get routed to through out_port. The
8c7acaaf
MW
368 * next hop to take (on out_port->remote) is determined by
369 * next_hop_index. When routing packet to another switch (out->remote is
370 * set) the @next_hop_index must match the @in_hop_index of that next
371 * hop to make routing possible.
520b6702
AN
372 *
373 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
374 * port.
375 */
376struct tb_path_hop {
377 struct tb_port *in_port;
378 struct tb_port *out_port;
379 int in_hop_index;
8c7acaaf 380 int in_counter_index;
520b6702 381 int next_hop_index;
0414bec5 382 unsigned int initial_credits;
02c5e7c2 383 unsigned int nfc_credits;
520b6702
AN
384};
385
386/**
387 * enum tb_path_port - path options mask
8c7acaaf
MW
388 * @TB_PATH_NONE: Do not activate on any hop on path
389 * @TB_PATH_SOURCE: Activate on the first hop (out of src)
390 * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
391 * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
392 * @TB_PATH_ALL: Activate on all hops on the path
520b6702
AN
393 */
394enum tb_path_port {
395 TB_PATH_NONE = 0,
8c7acaaf
MW
396 TB_PATH_SOURCE = 1,
397 TB_PATH_INTERNAL = 2,
398 TB_PATH_DESTINATION = 4,
520b6702
AN
399 TB_PATH_ALL = 7,
400};
401
402/**
403 * struct tb_path - a unidirectional path between two ports
8c7acaaf
MW
404 * @tb: Pointer to the domain structure
405 * @name: Name of the path (used for debugging)
8c7acaaf
MW
406 * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
407 * @egress_shared_buffer: Shared buffering used for egress ports on the path
408 * @ingress_fc_enable: Flow control for ingress ports on the path
409 * @egress_fc_enable: Flow control for egress ports on the path
410 * @priority: Priority group if the path
411 * @weight: Weight of the path inside the priority group
412 * @drop_packages: Drop packages from queue tail or head
413 * @activated: Is the path active
44242d6c
MW
414 * @clear_fc: Clear all flow control from the path config space entries
415 * when deactivating this path
8c7acaaf
MW
416 * @hops: Path hops
417 * @path_length: How many hops the path uses
43bddb26 418 * @alloc_hopid: Does this path consume port HopID
520b6702 419 *
8c7acaaf
MW
420 * A path consists of a number of hops (see &struct tb_path_hop). To
421 * establish a PCIe tunnel two paths have to be created between the two
422 * PCIe ports.
520b6702
AN
423 */
424struct tb_path {
425 struct tb *tb;
8c7acaaf 426 const char *name;
520b6702
AN
427 enum tb_path_port ingress_shared_buffer;
428 enum tb_path_port egress_shared_buffer;
429 enum tb_path_port ingress_fc_enable;
430 enum tb_path_port egress_fc_enable;
431
37209783 432 unsigned int priority:3;
520b6702
AN
433 int weight:4;
434 bool drop_packages;
435 bool activated;
44242d6c 436 bool clear_fc;
520b6702 437 struct tb_path_hop *hops;
8c7acaaf 438 int path_length;
43bddb26 439 bool alloc_hopid;
520b6702
AN
440};
441
0b2863ac
MW
442/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
443#define TB_PATH_MIN_HOPID 8
c738a794
MW
444/*
445 * Support paths from the farthest (depth 6) router to the host and back
446 * to the same level (not necessarily to the same router).
447 */
448#define TB_PATH_MAX_HOPS (7 * 2)
0b2863ac 449
b2911a59
MW
450/* Possible wake types */
451#define TB_WAKE_ON_CONNECT BIT(0)
452#define TB_WAKE_ON_DISCONNECT BIT(1)
453#define TB_WAKE_ON_USB4 BIT(2)
454#define TB_WAKE_ON_USB3 BIT(3)
455#define TB_WAKE_ON_PCIE BIT(4)
6026b703 456#define TB_WAKE_ON_DP BIT(5)
b2911a59 457
9d3cce0b
MW
458/**
459 * struct tb_cm_ops - Connection manager specific operations vector
f67cf491
MW
460 * @driver_ready: Called right after control channel is started. Used by
461 * ICM to send driver ready message to the firmware.
9d3cce0b
MW
462 * @start: Starts the domain
463 * @stop: Stops the domain
464 * @suspend_noirq: Connection manager specific suspend_noirq
465 * @resume_noirq: Connection manager specific resume_noirq
f67cf491 466 * @suspend: Connection manager specific suspend
884e4d57
MW
467 * @freeze_noirq: Connection manager specific freeze_noirq
468 * @thaw_noirq: Connection manager specific thaw_noirq
f67cf491 469 * @complete: Connection manager specific complete
2d8ff0b5
MW
470 * @runtime_suspend: Connection manager specific runtime_suspend
471 * @runtime_resume: Connection manager specific runtime_resume
4f7c2e0d
MW
472 * @runtime_suspend_switch: Runtime suspend a switch
473 * @runtime_resume_switch: Runtime resume a switch
81a54b5e 474 * @handle_event: Handle thunderbolt event
9aaa3b8b
MW
475 * @get_boot_acl: Get boot ACL list
476 * @set_boot_acl: Set boot ACL list
3da88be2 477 * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel)
f67cf491
MW
478 * @approve_switch: Approve switch
479 * @add_switch_key: Add key to switch
480 * @challenge_switch_key: Challenge switch using key
e6b245cc 481 * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
d1ff7024
MW
482 * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
483 * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
9490f711
MW
484 * @usb4_switch_op: Optional proxy for USB4 router operations. If set
485 * this will be called whenever USB4 router operation is
486 * performed. If this returns %-EOPNOTSUPP then the
487 * native USB4 router operation is called.
488 * @usb4_switch_nvm_authenticate_status: Optional callback that the CM
489 * implementation can be used to
490 * return status of USB4 NVM_AUTH
491 * router operation.
9d3cce0b
MW
492 */
493struct tb_cm_ops {
f67cf491 494 int (*driver_ready)(struct tb *tb);
9d3cce0b
MW
495 int (*start)(struct tb *tb);
496 void (*stop)(struct tb *tb);
497 int (*suspend_noirq)(struct tb *tb);
498 int (*resume_noirq)(struct tb *tb);
f67cf491 499 int (*suspend)(struct tb *tb);
884e4d57
MW
500 int (*freeze_noirq)(struct tb *tb);
501 int (*thaw_noirq)(struct tb *tb);
f67cf491 502 void (*complete)(struct tb *tb);
2d8ff0b5
MW
503 int (*runtime_suspend)(struct tb *tb);
504 int (*runtime_resume)(struct tb *tb);
4f7c2e0d
MW
505 int (*runtime_suspend_switch)(struct tb_switch *sw);
506 int (*runtime_resume_switch)(struct tb_switch *sw);
81a54b5e
MW
507 void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
508 const void *buf, size_t size);
9aaa3b8b
MW
509 int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
510 int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
3da88be2 511 int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw);
f67cf491
MW
512 int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
513 int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
514 int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
515 const u8 *challenge, u8 *response);
e6b245cc 516 int (*disconnect_pcie_paths)(struct tb *tb);
180b0689
MW
517 int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
518 int transmit_path, int transmit_ring,
519 int receive_path, int receive_ring);
520 int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
521 int transmit_path, int transmit_ring,
522 int receive_path, int receive_ring);
9490f711
MW
523 int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata,
524 u8 *status, const void *tx_data, size_t tx_data_len,
525 void *rx_data, size_t rx_data_len);
526 int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw,
527 u32 *status);
9d3cce0b 528};
520b6702 529
9d3cce0b
MW
530static inline void *tb_priv(struct tb *tb)
531{
532 return (void *)tb->privdata;
533}
534
2d8ff0b5
MW
535#define TB_AUTOSUSPEND_DELAY 15000 /* ms */
536
a25c8b2f
AN
537/* helper functions & macros */
538
539/**
540 * tb_upstream_port() - return the upstream port of a switch
541 *
542 * Every switch has an upstream port (for the root switch it is the NHI).
543 *
544 * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
545 * non root switches (on the NHI port remote is always NULL).
546 *
547 * Return: Returns the upstream port of the switch.
548 */
549static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
550{
551 return &sw->ports[sw->config.upstream_port_number];
552}
553
dfe40ca4
MW
554/**
555 * tb_is_upstream_port() - Is the port upstream facing
556 * @port: Port to check
557 *
558 * Returns true if @port is upstream facing port. In case of dual link
559 * ports both return true.
560 */
561static inline bool tb_is_upstream_port(const struct tb_port *port)
562{
563 const struct tb_port *upstream_port = tb_upstream_port(port->sw);
564 return port == upstream_port || port->dual_link_port == upstream_port;
565}
566
b323a98f 567static inline u64 tb_route(const struct tb_switch *sw)
a25c8b2f
AN
568{
569 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
570}
571
f67cf491
MW
572static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
573{
574 u8 port;
575
576 port = route >> (sw->config.depth * 8);
577 if (WARN_ON(port > sw->config.max_port_number))
578 return NULL;
579 return &sw->ports[port];
580}
581
dfe40ca4
MW
582/**
583 * tb_port_has_remote() - Does the port have switch connected downstream
584 * @port: Port to check
585 *
586 * Returns true only when the port is primary port and has remote set.
587 */
588static inline bool tb_port_has_remote(const struct tb_port *port)
589{
590 if (tb_is_upstream_port(port))
591 return false;
592 if (!port->remote)
593 return false;
594 if (port->dual_link_port && port->link_nr)
595 return false;
596
597 return true;
598}
599
344e0643
MW
600static inline bool tb_port_is_null(const struct tb_port *port)
601{
602 return port && port->port && port->config.type == TB_TYPE_PORT;
603}
604
a3cfebdc
MW
605static inline bool tb_port_is_nhi(const struct tb_port *port)
606{
607 return port && port->config.type == TB_TYPE_NHI;
608}
609
99cabbb0
MW
610static inline bool tb_port_is_pcie_down(const struct tb_port *port)
611{
612 return port && port->config.type == TB_TYPE_PCIE_DOWN;
613}
614
0414bec5
MW
615static inline bool tb_port_is_pcie_up(const struct tb_port *port)
616{
617 return port && port->config.type == TB_TYPE_PCIE_UP;
618}
619
4f807e47
MW
620static inline bool tb_port_is_dpin(const struct tb_port *port)
621{
622 return port && port->config.type == TB_TYPE_DP_HDMI_IN;
623}
624
625static inline bool tb_port_is_dpout(const struct tb_port *port)
626{
627 return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
628}
629
e6f81858
RM
630static inline bool tb_port_is_usb3_down(const struct tb_port *port)
631{
632 return port && port->config.type == TB_TYPE_USB3_DOWN;
633}
634
635static inline bool tb_port_is_usb3_up(const struct tb_port *port)
636{
637 return port && port->config.type == TB_TYPE_USB3_UP;
638}
639
a25c8b2f
AN
640static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
641 enum tb_cfg_space space, u32 offset, u32 length)
642{
4708384f
MW
643 if (sw->is_unplugged)
644 return -ENODEV;
a25c8b2f
AN
645 return tb_cfg_read(sw->tb->ctl,
646 buffer,
647 tb_route(sw),
648 0,
649 space,
650 offset,
651 length);
652}
653
826c6a17 654static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
a25c8b2f
AN
655 enum tb_cfg_space space, u32 offset, u32 length)
656{
4708384f
MW
657 if (sw->is_unplugged)
658 return -ENODEV;
a25c8b2f
AN
659 return tb_cfg_write(sw->tb->ctl,
660 buffer,
661 tb_route(sw),
662 0,
663 space,
664 offset,
665 length);
666}
667
668static inline int tb_port_read(struct tb_port *port, void *buffer,
669 enum tb_cfg_space space, u32 offset, u32 length)
670{
4708384f
MW
671 if (port->sw->is_unplugged)
672 return -ENODEV;
a25c8b2f
AN
673 return tb_cfg_read(port->sw->tb->ctl,
674 buffer,
675 tb_route(port->sw),
676 port->port,
677 space,
678 offset,
679 length);
680}
681
16a1258a 682static inline int tb_port_write(struct tb_port *port, const void *buffer,
a25c8b2f
AN
683 enum tb_cfg_space space, u32 offset, u32 length)
684{
4708384f
MW
685 if (port->sw->is_unplugged)
686 return -ENODEV;
a25c8b2f
AN
687 return tb_cfg_write(port->sw->tb->ctl,
688 buffer,
689 tb_route(port->sw),
690 port->port,
691 space,
692 offset,
693 length);
694}
695
696#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
697#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
698#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
699#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
daa5140f 700#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
a25c8b2f
AN
701
702#define __TB_SW_PRINT(level, sw, fmt, arg...) \
703 do { \
b323a98f 704 const struct tb_switch *__sw = (sw); \
a25c8b2f
AN
705 level(__sw->tb, "%llx: " fmt, \
706 tb_route(__sw), ## arg); \
707 } while (0)
708#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
709#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
710#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
daa5140f 711#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
a25c8b2f
AN
712
713#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
714 do { \
b323a98f 715 const struct tb_port *__port = (_port); \
ebe99c0f 716 level(__port->sw->tb, "%llx:%u: " fmt, \
a25c8b2f
AN
717 tb_route(__port->sw), __port->port, ## arg); \
718 } while (0)
719#define tb_port_WARN(port, fmt, arg...) \
720 __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
721#define tb_port_warn(port, fmt, arg...) \
722 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
723#define tb_port_info(port, fmt, arg...) \
724 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
daa5140f
MW
725#define tb_port_dbg(port, fmt, arg...) \
726 __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
a25c8b2f 727
f67cf491 728struct tb *icm_probe(struct tb_nhi *nhi);
9d3cce0b
MW
729struct tb *tb_probe(struct tb_nhi *nhi);
730
9d3cce0b 731extern struct device_type tb_domain_type;
dacb1287 732extern struct device_type tb_retimer_type;
bfe778ac 733extern struct device_type tb_switch_type;
cae5f515 734extern struct device_type usb4_port_device_type;
9d3cce0b
MW
735
736int tb_domain_init(void);
737void tb_domain_exit(void);
d1ff7024
MW
738int tb_xdomain_init(void);
739void tb_xdomain_exit(void);
a25c8b2f 740
7f0a34d7 741struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
9d3cce0b
MW
742int tb_domain_add(struct tb *tb);
743void tb_domain_remove(struct tb *tb);
744int tb_domain_suspend_noirq(struct tb *tb);
745int tb_domain_resume_noirq(struct tb *tb);
f67cf491 746int tb_domain_suspend(struct tb *tb);
884e4d57
MW
747int tb_domain_freeze_noirq(struct tb *tb);
748int tb_domain_thaw_noirq(struct tb *tb);
f67cf491 749void tb_domain_complete(struct tb *tb);
2d8ff0b5
MW
750int tb_domain_runtime_suspend(struct tb *tb);
751int tb_domain_runtime_resume(struct tb *tb);
3da88be2 752int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw);
f67cf491
MW
753int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
754int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
755int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
e6b245cc 756int tb_domain_disconnect_pcie_paths(struct tb *tb);
180b0689
MW
757int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
758 int transmit_path, int transmit_ring,
759 int receive_path, int receive_ring);
760int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
761 int transmit_path, int transmit_ring,
762 int receive_path, int receive_ring);
d1ff7024 763int tb_domain_disconnect_all_paths(struct tb *tb);
9d3cce0b 764
559c1e1e
MW
765static inline struct tb *tb_domain_get(struct tb *tb)
766{
767 if (tb)
768 get_device(&tb->dev);
769 return tb;
770}
771
9d3cce0b
MW
772static inline void tb_domain_put(struct tb *tb)
773{
774 put_device(&tb->dev);
775}
d6cc51cd 776
719a5fe8 777struct tb_nvm *tb_nvm_alloc(struct device *dev);
aef9c693
SC
778int tb_nvm_read_version(struct tb_nvm *nvm);
779int tb_nvm_validate(struct tb_nvm *nvm);
780int tb_nvm_write_headers(struct tb_nvm *nvm);
781int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read);
719a5fe8
MW
782int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
783 size_t bytes);
aef9c693 784int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write);
719a5fe8
MW
785void tb_nvm_free(struct tb_nvm *nvm);
786void tb_nvm_exit(void);
787
9b383037
MW
788typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
789typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t);
790
791int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
792 unsigned int retries, read_block_fn read_block,
793 void *read_block_data);
794int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
795 unsigned int retries, write_block_fn write_next_block,
796 void *write_block_data);
797
7bfafaa5
SC
798int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
799 size_t size);
bfe778ac
MW
800struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
801 u64 route);
e6b245cc
MW
802struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
803 struct device *parent, u64 route);
bfe778ac
MW
804int tb_switch_configure(struct tb_switch *sw);
805int tb_switch_add(struct tb_switch *sw);
806void tb_switch_remove(struct tb_switch *sw);
6ac6faee 807void tb_switch_suspend(struct tb_switch *sw, bool runtime);
23dd5bb4 808int tb_switch_resume(struct tb_switch *sw);
356b6c4e 809int tb_switch_reset(struct tb_switch *sw);
1639664f
GF
810int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
811 u32 value, int timeout_msec);
aae20bb6 812void tb_sw_set_unplugged(struct tb_switch *sw);
386e5e29
MW
813struct tb_port *tb_switch_find_port(struct tb_switch *sw,
814 enum tb_port_type type);
f67cf491
MW
815struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
816 u8 depth);
7c39ffe7 817struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
8e9267bb 818struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
f67cf491 819
b433d010
MW
820/**
821 * tb_switch_for_each_port() - Iterate over each switch port
822 * @sw: Switch whose ports to iterate
823 * @p: Port used as iterator
824 *
825 * Iterates over each switch port skipping the control port (port %0).
826 */
827#define tb_switch_for_each_port(sw, p) \
828 for ((p) = &(sw)->ports[1]; \
829 (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
830
b6b0ea70
MW
831static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
832{
833 if (sw)
834 get_device(&sw->dev);
835 return sw;
836}
837
bfe778ac
MW
838static inline void tb_switch_put(struct tb_switch *sw)
839{
840 put_device(&sw->dev);
841}
842
843static inline bool tb_is_switch(const struct device *dev)
844{
845 return dev->type == &tb_switch_type;
846}
847
162736b0 848static inline struct tb_switch *tb_to_switch(const struct device *dev)
bfe778ac
MW
849{
850 if (tb_is_switch(dev))
851 return container_of(dev, struct tb_switch, dev);
852 return NULL;
853}
854
0414bec5
MW
855static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
856{
857 return tb_to_switch(sw->dev.parent);
858}
859
17a8f815 860static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
8b0110d9 861{
35ee69e9
MW
862 return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
863 sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
8b0110d9
MW
864}
865
17a8f815 866static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
8b0110d9 867{
35ee69e9
MW
868 return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
869 sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
8b0110d9
MW
870}
871
17a8f815 872static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
99cabbb0 873{
35ee69e9
MW
874 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
875 switch (sw->config.device_id) {
876 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
877 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
878 return true;
879 }
99cabbb0 880 }
35ee69e9 881 return false;
99cabbb0
MW
882}
883
17a8f815 884static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
99cabbb0 885{
35ee69e9
MW
886 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
887 switch (sw->config.device_id) {
888 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
889 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
890 return true;
891 }
99cabbb0 892 }
35ee69e9 893 return false;
99cabbb0
MW
894}
895
7bffd97e
MW
896static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
897{
35ee69e9
MW
898 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
899 switch (sw->config.device_id) {
900 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
f1d5ec3e 901 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
35ee69e9
MW
902 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
903 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
904 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
905 return true;
906 }
7bffd97e 907 }
35ee69e9 908 return false;
7bffd97e
MW
909}
910
911static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
912{
35ee69e9
MW
913 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
914 switch (sw->config.device_id) {
915 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
916 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
917 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
918 return true;
919 }
7bffd97e 920 }
35ee69e9 921 return false;
7bffd97e
MW
922}
923
8a90e4fa
GF
924static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
925{
926 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
927 switch (sw->config.device_id) {
928 case PCI_DEVICE_ID_INTEL_TGL_NHI0:
929 case PCI_DEVICE_ID_INTEL_TGL_NHI1:
930 case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
931 case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
932 return true;
933 }
934 }
935 return false;
936}
937
b0407983
MW
938/**
939 * tb_switch_is_usb4() - Is the switch USB4 compliant
940 * @sw: Switch to check
941 *
942 * Returns true if the @sw is USB4 compliant router, false otherwise.
943 */
944static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
945{
946 return sw->config.thunderbolt_version == USB4_VERSION_1_0;
947}
948
f07a3608
MW
949/**
950 * tb_switch_is_icm() - Is the switch handled by ICM firmware
951 * @sw: Switch to check
952 *
953 * In case there is a need to differentiate whether ICM firmware or SW CM
954 * is handling @sw this function can be called. It is valid to call this
955 * after tb_switch_alloc() and tb_switch_configure() has been called
956 * (latter only for SW CM case).
957 */
958static inline bool tb_switch_is_icm(const struct tb_switch *sw)
959{
960 return !sw->config.enabled;
961}
962
91c0c120
MW
963int tb_switch_lane_bonding_enable(struct tb_switch *sw);
964void tb_switch_lane_bonding_disable(struct tb_switch *sw);
de462039
MW
965int tb_switch_configure_link(struct tb_switch *sw);
966void tb_switch_unconfigure_link(struct tb_switch *sw);
91c0c120 967
8afe909b
MW
968bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
969int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
970void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
971
cf29b9af
RM
972int tb_switch_tmu_init(struct tb_switch *sw);
973int tb_switch_tmu_post_time(struct tb_switch *sw);
974int tb_switch_tmu_disable(struct tb_switch *sw);
975int tb_switch_tmu_enable(struct tb_switch *sw);
a28ec0e1
GF
976void tb_switch_tmu_configure(struct tb_switch *sw,
977 enum tb_switch_tmu_rate rate,
978 bool unidirectional);
3084b48f
GF
979void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
980 enum tb_switch_tmu_rate rate);
a28ec0e1 981/**
b017a46d 982 * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
a28ec0e1
GF
983 * @sw: Router whose TMU mode to check
984 * @unidirectional: If uni-directional (bi-directional otherwise)
985 *
986 * Return true if hardware TMU configuration matches the one passed in
b017a46d 987 * as parameter. That is HiFi/Normal and either uni-directional or bi-directional.
a28ec0e1 988 */
b017a46d
GF
989static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw,
990 bool unidirectional)
cf29b9af 991{
b017a46d 992 return sw->tmu.rate == sw->tmu.rate_request &&
a28ec0e1 993 sw->tmu.unidirectional == unidirectional;
cf29b9af
RM
994}
995
b017a46d
GF
996static inline const char *tb_switch_clx_name(enum tb_clx clx)
997{
998 switch (clx) {
999 /* CL0s and CL1 are enabled and supported together */
1000 case TB_CL1:
1001 return "CL0s/CL1";
1002 default:
1003 return "unknown";
1004 }
1005}
1006
8a90e4fa
GF
1007int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
1008int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
1009
1010/**
1011 * tb_switch_is_clx_enabled() - Checks if the CLx is enabled
b017a46d
GF
1012 * @sw: Router to check for the CLx
1013 * @clx: The CLx state to check for
8a90e4fa 1014 *
b017a46d 1015 * Checks if the specified CLx is enabled on the router upstream link.
8a90e4fa
GF
1016 * Not applicable for a host router.
1017 */
b017a46d
GF
1018static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
1019 enum tb_clx clx)
8a90e4fa 1020{
b017a46d 1021 return sw->clx == clx;
8a90e4fa
GF
1022}
1023
43f977bc
GF
1024/**
1025 * tb_switch_is_clx_supported() - Is CLx supported on this type of router
1026 * @sw: The router to check CLx support for
1027 */
1028static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
1029{
7af9da8c
SM
1030 if (sw->quirks & QUIRK_NO_CLX)
1031 return false;
1032
43f977bc
GF
1033 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
1034}
1035
1036int tb_switch_mask_clx_objections(struct tb_switch *sw);
1037
1038int tb_switch_pcie_l1_enable(struct tb_switch *sw);
1039
30a4eca6
MW
1040int tb_switch_xhci_connect(struct tb_switch *sw);
1041void tb_switch_xhci_disconnect(struct tb_switch *sw);
1042
94581b25 1043int tb_port_state(struct tb_port *port);
9da672a4 1044int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
520b6702
AN
1045int tb_port_add_nfc_credits(struct tb_port *port, int credits);
1046int tb_port_clear_counter(struct tb_port *port, int counter);
b0407983 1047int tb_port_unlock(struct tb_port *port);
341d4518
MW
1048int tb_port_enable(struct tb_port *port);
1049int tb_port_disable(struct tb_port *port);
0b2863ac
MW
1050int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
1051void tb_port_release_in_hopid(struct tb_port *port, int hopid);
1052int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
1053void tb_port_release_out_hopid(struct tb_port *port, int hopid);
fb19fac1
MW
1054struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
1055 struct tb_port *prev);
9da672a4 1056
56ad3aef
MW
1057static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
1058{
1059 return tb_port_is_null(port) && port->sw->credit_allocation;
1060}
1061
c64c3f3a
MW
1062/**
1063 * tb_for_each_port_on_path() - Iterate over each port on path
1064 * @src: Source port
1065 * @dst: Destination port
1066 * @p: Port used as iterator
1067 *
1068 * Walks over each port on path from @src to @dst.
1069 */
1070#define tb_for_each_port_on_path(src, dst, p) \
1071 for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
1072 (p) = tb_next_port_on_path((src), (dst), (p)))
1073
5b7b8c0a 1074int tb_port_get_link_speed(struct tb_port *port);
4210d50f 1075int tb_port_get_link_width(struct tb_port *port);
0e14dd5e
MW
1076int tb_port_set_link_width(struct tb_port *port, unsigned int width);
1077int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
5cc0df9c
IH
1078int tb_port_lane_bonding_enable(struct tb_port *port);
1079void tb_port_lane_bonding_disable(struct tb_port *port);
e7051bea
MW
1080int tb_port_wait_for_link_width(struct tb_port *port, int width,
1081 int timeout_msec);
69fea377 1082int tb_port_update_credits(struct tb_port *port);
d3113761 1083bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
5b7b8c0a 1084
da2da04b 1085int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
aa43a9dc 1086int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
6de057ef 1087int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
da2da04b 1088int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
3c8b228d 1089int tb_port_next_cap(struct tb_port *port, unsigned int offset);
e78db6f0 1090bool tb_port_is_enabled(struct tb_port *port);
e2b8785e 1091
e6f81858
RM
1092bool tb_usb3_port_is_enabled(struct tb_port *port);
1093int tb_usb3_port_enable(struct tb_port *port, bool enable);
1094
0414bec5 1095bool tb_pci_port_is_enabled(struct tb_port *port);
93f36ade
MW
1096int tb_pci_port_enable(struct tb_port *port, bool enable);
1097
4f807e47
MW
1098int tb_dp_port_hpd_is_active(struct tb_port *port);
1099int tb_dp_port_hpd_clear(struct tb_port *port);
1100int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1101 unsigned int aux_tx, unsigned int aux_rx);
1102bool tb_dp_port_is_enabled(struct tb_port *port);
1103int tb_dp_port_enable(struct tb_port *port, bool enable);
1104
0414bec5
MW
1105struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
1106 struct tb_port *dst, int dst_hopid,
43bddb26
MW
1107 struct tb_port **last, const char *name,
1108 bool alloc_hopid);
8c7acaaf
MW
1109struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
1110 struct tb_port *dst, int dst_hopid, int link_nr,
1111 const char *name);
520b6702
AN
1112void tb_path_free(struct tb_path *path);
1113int tb_path_activate(struct tb_path *path);
1114void tb_path_deactivate(struct tb_path *path);
1115bool tb_path_is_invalid(struct tb_path *path);
0bd680cd
MW
1116bool tb_path_port_on_path(const struct tb_path *path,
1117 const struct tb_port *port);
520b6702 1118
6ed541c5
MW
1119/**
1120 * tb_path_for_each_hop() - Iterate over each hop on path
1121 * @path: Path whose hops to iterate
1122 * @hop: Hop used as iterator
1123 *
1124 * Iterates over each hop on path.
1125 */
1126#define tb_path_for_each_hop(path, hop) \
1127 for ((hop) = &(path)->hops[0]; \
1128 (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++)
1129
cd22e73b
AN
1130int tb_drom_read(struct tb_switch *sw);
1131int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
c90553b3 1132
a9be5582 1133int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
e28178bf
MW
1134int tb_lc_configure_port(struct tb_port *port);
1135void tb_lc_unconfigure_port(struct tb_port *port);
284652a4
MW
1136int tb_lc_configure_xdomain(struct tb_port *port);
1137void tb_lc_unconfigure_xdomain(struct tb_port *port);
fdb0887c 1138int tb_lc_start_lane_initialization(struct tb_port *port);
43f977bc 1139bool tb_lc_is_clx_supported(struct tb_port *port);
30a4eca6
MW
1140bool tb_lc_is_usb_plugged(struct tb_port *port);
1141bool tb_lc_is_xhci_connected(struct tb_port *port);
1142int tb_lc_xhci_connect(struct tb_port *port);
1143void tb_lc_xhci_disconnect(struct tb_port *port);
b2911a59 1144int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
5480dfc2 1145int tb_lc_set_sleep(struct tb_switch *sw);
91c0c120 1146bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
8afe909b
MW
1147bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
1148int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
1149int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
1cb36293 1150int tb_lc_force_power(struct tb_switch *sw);
a25c8b2f
AN
1151
1152static inline int tb_route_length(u64 route)
1153{
1154 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
1155}
1156
9da672a4
AN
1157/**
1158 * tb_downstream_route() - get route to downstream switch
1159 *
1160 * Port must not be the upstream port (otherwise a loop is created).
1161 *
1162 * Return: Returns a route to the switch behind @port.
1163 */
1164static inline u64 tb_downstream_route(struct tb_port *port)
1165{
1166 return tb_route(port->sw)
1167 | ((u64) port->port << (port->sw->config.depth * 8));
1168}
1169
5ca67688 1170bool tb_is_xdomain_enabled(void);
d1ff7024
MW
1171bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1172 const void *buf, size_t size);
1173struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1174 u64 route, const uuid_t *local_uuid,
1175 const uuid_t *remote_uuid);
1176void tb_xdomain_add(struct tb_xdomain *xd);
1177void tb_xdomain_remove(struct tb_xdomain *xd);
1178struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1179 u8 depth);
1180
7f333ace
MW
1181static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
1182{
1183 return tb_to_switch(xd->dev.parent);
1184}
1185
8b02b2da
MW
1186int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
1187 size_t size);
3fb10ea4 1188int tb_retimer_scan(struct tb_port *port, bool add);
dacb1287
KK
1189void tb_retimer_remove_all(struct tb_port *port);
1190
1191static inline bool tb_is_retimer(const struct device *dev)
1192{
1193 return dev->type == &tb_retimer_type;
1194}
1195
1196static inline struct tb_retimer *tb_to_retimer(struct device *dev)
1197{
1198 if (tb_is_retimer(dev))
1199 return container_of(dev, struct tb_retimer, dev);
1200 return NULL;
1201}
1202
b0407983
MW
1203int usb4_switch_setup(struct tb_switch *sw);
1204int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
1205int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
1206 size_t size);
b0407983 1207bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
b2911a59 1208int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
b0407983
MW
1209int usb4_switch_set_sleep(struct tb_switch *sw);
1210int usb4_switch_nvm_sector_size(struct tb_switch *sw);
1211int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
1212 size_t size);
1cbf680f 1213int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address);
b0407983
MW
1214int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
1215 const void *buf, size_t size);
1216int usb4_switch_nvm_authenticate(struct tb_switch *sw);
661b1947 1217int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status);
56ad3aef 1218int usb4_switch_credits_init(struct tb_switch *sw);
b0407983
MW
1219bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
1220int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
1221int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
1222struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
1223 const struct tb_port *port);
e6f81858
RM
1224struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
1225 const struct tb_port *port);
cae5f515
MW
1226int usb4_switch_add_ports(struct tb_switch *sw);
1227void usb4_switch_remove_ports(struct tb_switch *sw);
b0407983
MW
1228
1229int usb4_port_unlock(struct tb_port *port);
5d2569cb 1230int usb4_port_hotplug_enable(struct tb_port *port);
e28178bf
MW
1231int usb4_port_configure(struct tb_port *port);
1232void usb4_port_unconfigure(struct tb_port *port);
f9cad07b 1233int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
284652a4 1234void usb4_port_unconfigure_xdomain(struct tb_port *port);
3406de7c
RM
1235int usb4_port_router_offline(struct tb_port *port);
1236int usb4_port_router_online(struct tb_port *port);
02d12855 1237int usb4_port_enumerate_retimers(struct tb_port *port);
8a90e4fa 1238bool usb4_port_clx_supported(struct tb_port *port);
d0f1e0c2
MW
1239int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
1240int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
1241 unsigned int ber_level, bool timing, bool right_high,
1242 u32 *results);
1243int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
1244 bool right_high, u32 counter);
1245int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
02d12855 1246
3406de7c 1247int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
cd0c1e58 1248int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
02d12855
RM
1249int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1250 u8 size);
1251int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1252 const void *buf, u8 size);
1253int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
1254int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
faa1c615
RM
1255int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1256 unsigned int address);
02d12855
RM
1257int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
1258 unsigned int address, const void *buf,
1259 size_t size);
1260int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
1261int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1262 u32 *status);
1263int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1264 unsigned int address, void *buf, size_t size);
3b1d8d57
MW
1265
1266int usb4_usb3_port_max_link_rate(struct tb_port *port);
1267int usb4_usb3_port_actual_link_rate(struct tb_port *port);
1268int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1269 int *downstream_bw);
1270int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1271 int *downstream_bw);
1272int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1273 int *downstream_bw);
1cb36293 1274
e3273801
MW
1275int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
1276bool usb4_dp_port_bw_mode_supported(struct tb_port *port);
1277bool usb4_dp_port_bw_mode_enabled(struct tb_port *port);
1278int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported);
1279int usb4_dp_port_group_id(struct tb_port *port);
1280int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
1281int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
1282int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
1283int usb4_dp_port_granularity(struct tb_port *port);
1284int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
1285int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw);
1286int usb4_dp_port_allocated_bw(struct tb_port *port);
1287int usb4_dp_port_allocate_bw(struct tb_port *port, int bw);
1288int usb4_dp_port_requested_bw(struct tb_port *port);
1289
cae5f515
MW
1290static inline bool tb_is_usb4_port_device(const struct device *dev)
1291{
1292 return dev->type == &usb4_port_device_type;
1293}
1294
1295static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev)
1296{
1297 if (tb_is_usb4_port_device(dev))
1298 return container_of(dev, struct usb4_port, dev);
1299 return NULL;
1300}
1301
1302struct usb4_port *usb4_port_device_add(struct tb_port *port);
1303void usb4_port_device_remove(struct usb4_port *usb4);
3fb10ea4 1304int usb4_port_device_resume(struct usb4_port *usb4);
cae5f515 1305
1cb36293
ML
1306void tb_check_quirks(struct tb_switch *sw);
1307
b2be2b05
MW
1308#ifdef CONFIG_ACPI
1309void tb_acpi_add_links(struct tb_nhi *nhi);
c6da62a2
MW
1310
1311bool tb_acpi_is_native(void);
1312bool tb_acpi_may_tunnel_usb3(void);
1313bool tb_acpi_may_tunnel_dp(void);
1314bool tb_acpi_may_tunnel_pcie(void);
1315bool tb_acpi_is_xdomain_allowed(void);
ccc5cb8a
RM
1316
1317int tb_acpi_init(void);
1318void tb_acpi_exit(void);
1319int tb_acpi_power_on_retimers(struct tb_port *port);
1320int tb_acpi_power_off_retimers(struct tb_port *port);
b2be2b05
MW
1321#else
1322static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
c6da62a2
MW
1323
1324static inline bool tb_acpi_is_native(void) { return true; }
1325static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
1326static inline bool tb_acpi_may_tunnel_dp(void) { return true; }
1327static inline bool tb_acpi_may_tunnel_pcie(void) { return true; }
1328static inline bool tb_acpi_is_xdomain_allowed(void) { return true; }
ccc5cb8a
RM
1329
1330static inline int tb_acpi_init(void) { return 0; }
1331static inline void tb_acpi_exit(void) { }
1332static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; }
1333static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; }
b2be2b05
MW
1334#endif
1335
54e41810
GF
1336#ifdef CONFIG_DEBUG_FS
1337void tb_debugfs_init(void);
1338void tb_debugfs_exit(void);
1339void tb_switch_debugfs_init(struct tb_switch *sw);
1340void tb_switch_debugfs_remove(struct tb_switch *sw);
d0f1e0c2
MW
1341void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
1342void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
407ac931
MW
1343void tb_service_debugfs_init(struct tb_service *svc);
1344void tb_service_debugfs_remove(struct tb_service *svc);
54e41810
GF
1345#else
1346static inline void tb_debugfs_init(void) { }
1347static inline void tb_debugfs_exit(void) { }
1348static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
1349static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
d0f1e0c2
MW
1350static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
1351static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
407ac931
MW
1352static inline void tb_service_debugfs_init(struct tb_service *svc) { }
1353static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
54e41810
GF
1354#endif
1355
d6cc51cd 1356#endif