Commit | Line | Data |
---|---|---|
fd3b339c | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
cdae7c07 MW |
2 | /* |
3 | * Thunderbolt service API | |
4 | * | |
eaf8ff35 | 5 | * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com> |
cdae7c07 MW |
6 | * Copyright (C) 2017, Intel Corporation |
7 | * Authors: Michael Jamet <michael.jamet@intel.com> | |
8 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
cdae7c07 MW |
9 | */ |
10 | ||
11 | #ifndef THUNDERBOLT_H_ | |
12 | #define THUNDERBOLT_H_ | |
13 | ||
e80c2359 AB |
14 | #include <linux/types.h> |
15 | ||
16 | struct fwnode_handle; | |
17 | struct device; | |
18 | ||
19 | #if IS_REACHABLE(CONFIG_USB4) | |
20 | ||
9e99b9f4 | 21 | #include <linux/device.h> |
3b3d9f4d | 22 | #include <linux/idr.h> |
cdae7c07 | 23 | #include <linux/list.h> |
9e99b9f4 | 24 | #include <linux/mutex.h> |
d1ff7024 | 25 | #include <linux/mod_devicetable.h> |
3304559e | 26 | #include <linux/pci.h> |
cdae7c07 | 27 | #include <linux/uuid.h> |
3b3d9f4d | 28 | #include <linux/workqueue.h> |
cdae7c07 | 29 | |
eaf8ff35 MW |
30 | enum tb_cfg_pkg_type { |
31 | TB_CFG_PKG_READ = 1, | |
32 | TB_CFG_PKG_WRITE = 2, | |
33 | TB_CFG_PKG_ERROR = 3, | |
34 | TB_CFG_PKG_NOTIFY_ACK = 4, | |
35 | TB_CFG_PKG_EVENT = 5, | |
36 | TB_CFG_PKG_XDOMAIN_REQ = 6, | |
37 | TB_CFG_PKG_XDOMAIN_RESP = 7, | |
38 | TB_CFG_PKG_OVERRIDE = 8, | |
39 | TB_CFG_PKG_RESET = 9, | |
40 | TB_CFG_PKG_ICM_EVENT = 10, | |
41 | TB_CFG_PKG_ICM_CMD = 11, | |
42 | TB_CFG_PKG_ICM_RESP = 12, | |
eaf8ff35 MW |
43 | }; |
44 | ||
9e99b9f4 MW |
45 | /** |
46 | * enum tb_security_level - Thunderbolt security level | |
47 | * @TB_SECURITY_NONE: No security, legacy mode | |
48 | * @TB_SECURITY_USER: User approval required at minimum | |
49 | * @TB_SECURITY_SECURE: One time saved key required at minimum | |
50 | * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB) | |
6fc14e1a MW |
51 | * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected |
52 | * Thunderbolt dock (and Display Port). All PCIe | |
53 | * links downstream of the dock are removed. | |
3cd542e6 MW |
54 | * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the |
55 | * PCIe tunneling is disabled from the BIOS. | |
9e99b9f4 MW |
56 | */ |
57 | enum tb_security_level { | |
58 | TB_SECURITY_NONE, | |
59 | TB_SECURITY_USER, | |
60 | TB_SECURITY_SECURE, | |
61 | TB_SECURITY_DPONLY, | |
6fc14e1a | 62 | TB_SECURITY_USBONLY, |
3cd542e6 | 63 | TB_SECURITY_NOPCIE, |
9e99b9f4 MW |
64 | }; |
65 | ||
66 | /** | |
67 | * struct tb - main thunderbolt bus structure | |
68 | * @dev: Domain device | |
69 | * @lock: Big lock. Must be held when accessing any struct | |
70 | * tb_switch / struct tb_port. | |
71 | * @nhi: Pointer to the NHI structure | |
72 | * @ctl: Control channel for this domain | |
73 | * @wq: Ordered workqueue for all domain specific work | |
74 | * @root_switch: Root switch of this domain | |
75 | * @cm_ops: Connection manager specific operations vector | |
76 | * @index: Linux assigned domain number | |
77 | * @security_level: Current security level | |
9aaa3b8b | 78 | * @nboot_acl: Number of boot ACLs the domain supports |
9e99b9f4 MW |
79 | * @privdata: Private connection manager specific data |
80 | */ | |
81 | struct tb { | |
82 | struct device dev; | |
83 | struct mutex lock; | |
84 | struct tb_nhi *nhi; | |
85 | struct tb_ctl *ctl; | |
86 | struct workqueue_struct *wq; | |
87 | struct tb_switch *root_switch; | |
88 | const struct tb_cm_ops *cm_ops; | |
89 | int index; | |
90 | enum tb_security_level security_level; | |
9aaa3b8b | 91 | size_t nboot_acl; |
913b99f7 | 92 | unsigned long privdata[]; |
9e99b9f4 MW |
93 | }; |
94 | ||
8be0c877 | 95 | extern const struct bus_type tb_bus_type; |
b8a73083 RM |
96 | extern const struct device_type tb_service_type; |
97 | extern const struct device_type tb_xdomain_type; | |
9e99b9f4 | 98 | |
e69b71f8 MW |
99 | #define TB_LINKS_PER_PHY_PORT 2 |
100 | ||
101 | static inline unsigned int tb_phy_port_from_link(unsigned int link) | |
102 | { | |
103 | return (link - 1) / TB_LINKS_PER_PHY_PORT; | |
104 | } | |
105 | ||
cdae7c07 MW |
106 | /** |
107 | * struct tb_property_dir - XDomain property directory | |
108 | * @uuid: Directory UUID or %NULL if root directory | |
109 | * @properties: List of properties in this directory | |
110 | * | |
111 | * User needs to provide serialization if needed. | |
112 | */ | |
113 | struct tb_property_dir { | |
114 | const uuid_t *uuid; | |
115 | struct list_head properties; | |
116 | }; | |
117 | ||
118 | enum tb_property_type { | |
119 | TB_PROPERTY_TYPE_UNKNOWN = 0x00, | |
120 | TB_PROPERTY_TYPE_DIRECTORY = 0x44, | |
121 | TB_PROPERTY_TYPE_DATA = 0x64, | |
122 | TB_PROPERTY_TYPE_TEXT = 0x74, | |
123 | TB_PROPERTY_TYPE_VALUE = 0x76, | |
124 | }; | |
125 | ||
126 | #define TB_PROPERTY_KEY_SIZE 8 | |
127 | ||
128 | /** | |
129 | * struct tb_property - XDomain property | |
130 | * @list: Used to link properties together in a directory | |
131 | * @key: Key for the property (always terminated). | |
132 | * @type: Type of the property | |
133 | * @length: Length of the property data in dwords | |
134 | * @value: Property value | |
135 | * | |
136 | * Users use @type to determine which field in @value is filled. | |
137 | */ | |
138 | struct tb_property { | |
139 | struct list_head list; | |
140 | char key[TB_PROPERTY_KEY_SIZE + 1]; | |
141 | enum tb_property_type type; | |
142 | size_t length; | |
143 | union { | |
144 | struct tb_property_dir *dir; | |
145 | u8 *data; | |
146 | char *text; | |
147 | u32 immediate; | |
148 | } value; | |
149 | }; | |
150 | ||
151 | struct tb_property_dir *tb_property_parse_dir(const u32 *block, | |
152 | size_t block_len); | |
153 | ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, | |
154 | size_t block_len); | |
7d3084c0 | 155 | struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir); |
cdae7c07 MW |
156 | struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); |
157 | void tb_property_free_dir(struct tb_property_dir *dir); | |
158 | int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, | |
159 | u32 value); | |
160 | int tb_property_add_data(struct tb_property_dir *parent, const char *key, | |
161 | const void *buf, size_t buflen); | |
162 | int tb_property_add_text(struct tb_property_dir *parent, const char *key, | |
163 | const char *text); | |
164 | int tb_property_add_dir(struct tb_property_dir *parent, const char *key, | |
165 | struct tb_property_dir *dir); | |
166 | void tb_property_remove(struct tb_property *tb_property); | |
167 | struct tb_property *tb_property_find(struct tb_property_dir *dir, | |
168 | const char *key, enum tb_property_type type); | |
169 | struct tb_property *tb_property_get_next(struct tb_property_dir *dir, | |
170 | struct tb_property *prev); | |
171 | ||
172 | #define tb_property_for_each(dir, property) \ | |
173 | for (property = tb_property_get_next(dir, NULL); \ | |
174 | property; \ | |
175 | property = tb_property_get_next(dir, property)) | |
176 | ||
d1ff7024 MW |
177 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir); |
178 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); | |
179 | ||
e111fb92 GF |
180 | /** |
181 | * enum tb_link_width - Thunderbolt/USB4 link width | |
182 | * @TB_LINK_WIDTH_SINGLE: Single lane link | |
183 | * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link | |
7cbabed1 | 184 | * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters |
e111fb92 GF |
185 | * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers |
186 | */ | |
187 | enum tb_link_width { | |
188 | TB_LINK_WIDTH_SINGLE = BIT(0), | |
189 | TB_LINK_WIDTH_DUAL = BIT(1), | |
190 | TB_LINK_WIDTH_ASYM_TX = BIT(2), | |
191 | TB_LINK_WIDTH_ASYM_RX = BIT(3), | |
192 | }; | |
193 | ||
d1ff7024 MW |
194 | /** |
195 | * struct tb_xdomain - Cross-domain (XDomain) connection | |
196 | * @dev: XDomain device | |
197 | * @tb: Pointer to the domain | |
198 | * @remote_uuid: UUID of the remote domain (host) | |
199 | * @local_uuid: Cached local UUID | |
200 | * @route: Route string the other domain can be reached | |
201 | * @vendor: Vendor ID of the remote domain | |
202 | * @device: Device ID of the demote domain | |
46b494f2 MW |
203 | * @local_max_hopid: Maximum input HopID of this host |
204 | * @remote_max_hopid: Maximum input HopID of the remote host | |
d1ff7024 MW |
205 | * @lock: Lock to serialize access to the following fields of this structure |
206 | * @vendor_name: Name of the vendor (or %NULL if not known) | |
207 | * @device_name: Name of the device (or %NULL if not known) | |
4210d50f | 208 | * @link_speed: Speed of the link in Gb/s |
e111fb92 | 209 | * @link_width: Width of the downstream facing link |
f9cad07b | 210 | * @link_usb4: Downstream link is USB4 |
d1ff7024 | 211 | * @is_unplugged: The XDomain is unplugged |
3b4b3235 MW |
212 | * @needs_uuid: If the XDomain does not have @remote_uuid it will be |
213 | * queried first | |
d1ff7024 | 214 | * @service_ids: Used to generate IDs for the services |
180b0689 MW |
215 | * @in_hopids: Input HopIDs for DMA tunneling |
216 | * @out_hopids; Output HopIDs for DMA tunneling | |
46b494f2 MW |
217 | * @local_property_block: Local block of properties |
218 | * @local_property_block_gen: Generation of @local_property_block | |
219 | * @local_property_block_len: Length of the @local_property_block in dwords | |
220 | * @remote_properties: Properties exported by the remote domain | |
221 | * @remote_property_block_gen: Generation of @remote_properties | |
8e1de704 MW |
222 | * @state: Next XDomain discovery state to run |
223 | * @state_work: Work used to run the next state | |
224 | * @state_retries: Number of retries remain for the state | |
d1ff7024 MW |
225 | * @properties_changed_work: Work used to notify the remote domain that |
226 | * our properties have changed | |
227 | * @properties_changed_retries: Number of times left to send properties | |
228 | * changed notification | |
8e1de704 MW |
229 | * @bonding_possible: True if lane bonding is possible on local side |
230 | * @target_link_width: Target link width from the remote host | |
d1ff7024 MW |
231 | * @link: Root switch link the remote domain is connected (ICM only) |
232 | * @depth: Depth in the chain the remote domain is connected (ICM only) | |
233 | * | |
234 | * This structure represents connection across two domains (hosts). | |
235 | * Each XDomain contains zero or more services which are exposed as | |
236 | * &struct tb_service objects. | |
237 | * | |
238 | * Service drivers may access this structure if they need to enumerate | |
239 | * non-standard properties but they need hold @lock when doing so | |
240 | * because properties can be changed asynchronously in response to | |
241 | * changes in the remote domain. | |
242 | */ | |
243 | struct tb_xdomain { | |
244 | struct device dev; | |
245 | struct tb *tb; | |
246 | uuid_t *remote_uuid; | |
247 | const uuid_t *local_uuid; | |
248 | u64 route; | |
249 | u16 vendor; | |
250 | u16 device; | |
46b494f2 MW |
251 | unsigned int local_max_hopid; |
252 | unsigned int remote_max_hopid; | |
d1ff7024 MW |
253 | struct mutex lock; |
254 | const char *vendor_name; | |
255 | const char *device_name; | |
4210d50f | 256 | unsigned int link_speed; |
e111fb92 | 257 | enum tb_link_width link_width; |
f9cad07b | 258 | bool link_usb4; |
d1ff7024 | 259 | bool is_unplugged; |
3b4b3235 | 260 | bool needs_uuid; |
d1ff7024 | 261 | struct ida service_ids; |
180b0689 MW |
262 | struct ida in_hopids; |
263 | struct ida out_hopids; | |
46b494f2 MW |
264 | u32 *local_property_block; |
265 | u32 local_property_block_gen; | |
266 | u32 local_property_block_len; | |
267 | struct tb_property_dir *remote_properties; | |
268 | u32 remote_property_block_gen; | |
8e1de704 MW |
269 | int state; |
270 | struct delayed_work state_work; | |
271 | int state_retries; | |
d1ff7024 MW |
272 | struct delayed_work properties_changed_work; |
273 | int properties_changed_retries; | |
8e1de704 MW |
274 | bool bonding_possible; |
275 | u8 target_link_width; | |
d1ff7024 MW |
276 | u8 link; |
277 | u8 depth; | |
278 | }; | |
279 | ||
5cc0df9c IH |
280 | int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); |
281 | void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); | |
180b0689 MW |
282 | int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid); |
283 | void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid); | |
284 | int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid); | |
285 | void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid); | |
286 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, | |
287 | int transmit_ring, int receive_path, | |
288 | int receive_ring); | |
289 | int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, | |
290 | int transmit_ring, int receive_path, | |
291 | int receive_ring); | |
292 | ||
293 | static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd) | |
294 | { | |
295 | return tb_xdomain_disable_paths(xd, -1, -1, -1, -1); | |
296 | } | |
297 | ||
d1ff7024 | 298 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); |
484cb153 | 299 | struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); |
d1ff7024 MW |
300 | |
301 | static inline struct tb_xdomain * | |
302 | tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid) | |
303 | { | |
304 | struct tb_xdomain *xd; | |
305 | ||
306 | mutex_lock(&tb->lock); | |
307 | xd = tb_xdomain_find_by_uuid(tb, uuid); | |
308 | mutex_unlock(&tb->lock); | |
309 | ||
310 | return xd; | |
311 | } | |
312 | ||
484cb153 RM |
313 | static inline struct tb_xdomain * |
314 | tb_xdomain_find_by_route_locked(struct tb *tb, u64 route) | |
315 | { | |
316 | struct tb_xdomain *xd; | |
317 | ||
318 | mutex_lock(&tb->lock); | |
319 | xd = tb_xdomain_find_by_route(tb, route); | |
320 | mutex_unlock(&tb->lock); | |
321 | ||
322 | return xd; | |
323 | } | |
324 | ||
d1ff7024 MW |
325 | static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd) |
326 | { | |
327 | if (xd) | |
328 | get_device(&xd->dev); | |
329 | return xd; | |
330 | } | |
331 | ||
332 | static inline void tb_xdomain_put(struct tb_xdomain *xd) | |
333 | { | |
334 | if (xd) | |
335 | put_device(&xd->dev); | |
336 | } | |
337 | ||
338 | static inline bool tb_is_xdomain(const struct device *dev) | |
339 | { | |
340 | return dev->type == &tb_xdomain_type; | |
341 | } | |
342 | ||
343 | static inline struct tb_xdomain *tb_to_xdomain(struct device *dev) | |
344 | { | |
345 | if (tb_is_xdomain(dev)) | |
346 | return container_of(dev, struct tb_xdomain, dev); | |
347 | return NULL; | |
348 | } | |
349 | ||
350 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, | |
351 | size_t size, enum tb_cfg_pkg_type type); | |
352 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, | |
353 | size_t request_size, enum tb_cfg_pkg_type request_type, | |
354 | void *response, size_t response_size, | |
355 | enum tb_cfg_pkg_type response_type, | |
356 | unsigned int timeout_msec); | |
357 | ||
358 | /** | |
359 | * tb_protocol_handler - Protocol specific handler | |
360 | * @uuid: XDomain messages with this UUID are dispatched to this handler | |
361 | * @callback: Callback called with the XDomain message. Returning %1 | |
362 | * here tells the XDomain core that the message was handled | |
363 | * by this handler and should not be forwared to other | |
364 | * handlers. | |
365 | * @data: Data passed with the callback | |
366 | * @list: Handlers are linked using this | |
367 | * | |
368 | * Thunderbolt services can hook into incoming XDomain requests by | |
369 | * registering protocol handler. Only limitation is that the XDomain | |
370 | * discovery protocol UUID cannot be registered since it is handled by | |
371 | * the core XDomain code. | |
372 | * | |
373 | * The @callback must check that the message is really directed to the | |
374 | * service the driver implements. | |
375 | */ | |
376 | struct tb_protocol_handler { | |
377 | const uuid_t *uuid; | |
378 | int (*callback)(const void *buf, size_t size, void *data); | |
379 | void *data; | |
380 | struct list_head list; | |
381 | }; | |
382 | ||
383 | int tb_register_protocol_handler(struct tb_protocol_handler *handler); | |
384 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler); | |
385 | ||
386 | /** | |
387 | * struct tb_service - Thunderbolt service | |
388 | * @dev: XDomain device | |
389 | * @id: ID of the service (shown in sysfs) | |
390 | * @key: Protocol key from the properties directory | |
391 | * @prtcid: Protocol ID from the properties directory | |
392 | * @prtcvers: Protocol version from the properties directory | |
393 | * @prtcrevs: Protocol software revision from the properties directory | |
394 | * @prtcstns: Protocol settings mask from the properties directory | |
407ac931 MW |
395 | * @debugfs_dir: Pointer to the service debugfs directory. Always created |
396 | * when debugfs is enabled. Can be used by service drivers to | |
397 | * add their own entries under the service. | |
d1ff7024 MW |
398 | * |
399 | * Each domain exposes set of services it supports as collection of | |
400 | * properties. For each service there will be one corresponding | |
401 | * &struct tb_service. Service drivers are bound to these. | |
402 | */ | |
403 | struct tb_service { | |
404 | struct device dev; | |
405 | int id; | |
406 | const char *key; | |
407 | u32 prtcid; | |
408 | u32 prtcvers; | |
409 | u32 prtcrevs; | |
410 | u32 prtcstns; | |
407ac931 | 411 | struct dentry *debugfs_dir; |
d1ff7024 MW |
412 | }; |
413 | ||
414 | static inline struct tb_service *tb_service_get(struct tb_service *svc) | |
415 | { | |
416 | if (svc) | |
417 | get_device(&svc->dev); | |
418 | return svc; | |
419 | } | |
420 | ||
421 | static inline void tb_service_put(struct tb_service *svc) | |
422 | { | |
423 | if (svc) | |
424 | put_device(&svc->dev); | |
425 | } | |
426 | ||
427 | static inline bool tb_is_service(const struct device *dev) | |
428 | { | |
429 | return dev->type == &tb_service_type; | |
430 | } | |
431 | ||
432 | static inline struct tb_service *tb_to_service(struct device *dev) | |
433 | { | |
434 | if (tb_is_service(dev)) | |
435 | return container_of(dev, struct tb_service, dev); | |
436 | return NULL; | |
437 | } | |
438 | ||
439 | /** | |
440 | * tb_service_driver - Thunderbolt service driver | |
441 | * @driver: Driver structure | |
442 | * @probe: Called when the driver is probed | |
443 | * @remove: Called when the driver is removed (optional) | |
444 | * @shutdown: Called at shutdown time to stop the service (optional) | |
445 | * @id_table: Table of service identifiers the driver supports | |
446 | */ | |
447 | struct tb_service_driver { | |
448 | struct device_driver driver; | |
449 | int (*probe)(struct tb_service *svc, const struct tb_service_id *id); | |
450 | void (*remove)(struct tb_service *svc); | |
451 | void (*shutdown)(struct tb_service *svc); | |
452 | const struct tb_service_id *id_table; | |
453 | }; | |
454 | ||
455 | #define TB_SERVICE(key, id) \ | |
456 | .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \ | |
457 | TBSVC_MATCH_PROTOCOL_ID, \ | |
458 | .protocol_key = (key), \ | |
459 | .protocol_id = (id) | |
460 | ||
461 | int tb_register_service_driver(struct tb_service_driver *drv); | |
462 | void tb_unregister_service_driver(struct tb_service_driver *drv); | |
463 | ||
464 | static inline void *tb_service_get_drvdata(const struct tb_service *svc) | |
465 | { | |
466 | return dev_get_drvdata(&svc->dev); | |
467 | } | |
468 | ||
469 | static inline void tb_service_set_drvdata(struct tb_service *svc, void *data) | |
470 | { | |
471 | dev_set_drvdata(&svc->dev, data); | |
472 | } | |
473 | ||
474 | static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) | |
475 | { | |
476 | return tb_to_xdomain(svc->dev.parent); | |
477 | } | |
478 | ||
3b3d9f4d MW |
479 | /** |
480 | * struct tb_nhi - thunderbolt native host interface | |
481 | * @lock: Must be held during ring creation/destruction. Is acquired by | |
482 | * interrupt_work when dispatching interrupts to individual rings. | |
483 | * @pdev: Pointer to the PCI device | |
3cdb9446 | 484 | * @ops: NHI specific optional ops |
3b3d9f4d MW |
485 | * @iobase: MMIO space of the NHI |
486 | * @tx_rings: All Tx rings available on this host controller | |
487 | * @rx_rings: All Rx rings available on this host controller | |
488 | * @msix_ida: Used to allocate MSI-X vectors for rings | |
489 | * @going_away: The host controller device is about to disappear so when | |
490 | * this flag is set, avoid touching the hardware anymore. | |
86eaf4a5 | 491 | * @iommu_dma_protection: An IOMMU will isolate external-facing ports. |
3b3d9f4d MW |
492 | * @interrupt_work: Work scheduled to handle ring interrupt when no |
493 | * MSI-X is used. | |
494 | * @hop_count: Number of rings (end point hops) supported by NHI. | |
e390909a | 495 | * @quirks: NHI specific quirks if any |
3b3d9f4d MW |
496 | */ |
497 | struct tb_nhi { | |
59120e06 | 498 | spinlock_t lock; |
3b3d9f4d | 499 | struct pci_dev *pdev; |
3cdb9446 | 500 | const struct tb_nhi_ops *ops; |
3b3d9f4d MW |
501 | void __iomem *iobase; |
502 | struct tb_ring **tx_rings; | |
503 | struct tb_ring **rx_rings; | |
504 | struct ida msix_ida; | |
505 | bool going_away; | |
86eaf4a5 | 506 | bool iommu_dma_protection; |
3b3d9f4d MW |
507 | struct work_struct interrupt_work; |
508 | u32 hop_count; | |
e390909a | 509 | unsigned long quirks; |
3b3d9f4d MW |
510 | }; |
511 | ||
512 | /** | |
513 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI | |
514 | * @lock: Lock serializing actions to this ring. Must be acquired after | |
515 | * nhi->lock. | |
516 | * @nhi: Pointer to the native host controller interface | |
517 | * @size: Size of the ring | |
518 | * @hop: Hop (DMA channel) associated with this ring | |
519 | * @head: Head of the ring (write next descriptor here) | |
520 | * @tail: Tail of the ring (complete next descriptor here) | |
521 | * @descriptors: Allocated descriptors for this ring | |
522 | * @queue: Queue holding frames to be transferred over this ring | |
523 | * @in_flight: Queue holding frames that are currently in flight | |
524 | * @work: Interrupt work structure | |
525 | * @is_tx: Is the ring Tx or Rx | |
526 | * @running: Is the ring running | |
527 | * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. | |
528 | * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) | |
529 | * @flags: Ring specific flags | |
afe704a2 MW |
530 | * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to |
531 | * RX ring. For TX ring this should be set to %0. | |
3b3d9f4d MW |
532 | * @sof_mask: Bit mask used to detect start of frame PDF |
533 | * @eof_mask: Bit mask used to detect end of frame PDF | |
4ffe722e MW |
534 | * @start_poll: Called when ring interrupt is triggered to start |
535 | * polling. Passing %NULL keeps the ring in interrupt mode. | |
536 | * @poll_data: Data passed to @start_poll | |
3b3d9f4d MW |
537 | */ |
538 | struct tb_ring { | |
22b7de10 | 539 | spinlock_t lock; |
3b3d9f4d MW |
540 | struct tb_nhi *nhi; |
541 | int size; | |
542 | int hop; | |
543 | int head; | |
544 | int tail; | |
545 | struct ring_desc *descriptors; | |
546 | dma_addr_t descriptors_dma; | |
547 | struct list_head queue; | |
548 | struct list_head in_flight; | |
549 | struct work_struct work; | |
550 | bool is_tx:1; | |
551 | bool running:1; | |
552 | int irq; | |
553 | u8 vector; | |
554 | unsigned int flags; | |
afe704a2 | 555 | int e2e_tx_hop; |
3b3d9f4d MW |
556 | u16 sof_mask; |
557 | u16 eof_mask; | |
4ffe722e MW |
558 | void (*start_poll)(void *data); |
559 | void *poll_data; | |
3b3d9f4d MW |
560 | }; |
561 | ||
562 | /* Leave ring interrupt enabled on suspend */ | |
563 | #define RING_FLAG_NO_SUSPEND BIT(0) | |
564 | /* Configure the ring to be in frame mode */ | |
565 | #define RING_FLAG_FRAME BIT(1) | |
afe704a2 MW |
566 | /* Enable end-to-end flow control */ |
567 | #define RING_FLAG_E2E BIT(2) | |
3b3d9f4d MW |
568 | |
569 | struct ring_frame; | |
570 | typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); | |
571 | ||
2a91ec63 MW |
572 | /** |
573 | * enum ring_desc_flags - Flags for DMA ring descriptor | |
574 | * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only) | |
575 | * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only) | |
576 | * %RING_DESC_COMPLETED: Descriptor completed (set by NHI) | |
577 | * %RING_DESC_POSTED: Always set this | |
578 | * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun | |
579 | * %RING_DESC_INTERRUPT: Request an interrupt on completion | |
580 | */ | |
581 | enum ring_desc_flags { | |
582 | RING_DESC_ISOCH = 0x1, | |
583 | RING_DESC_CRC_ERROR = 0x1, | |
584 | RING_DESC_COMPLETED = 0x2, | |
585 | RING_DESC_POSTED = 0x4, | |
586 | RING_DESC_BUFFER_OVERRUN = 0x04, | |
587 | RING_DESC_INTERRUPT = 0x8, | |
588 | }; | |
589 | ||
3b3d9f4d MW |
590 | /** |
591 | * struct ring_frame - For use with ring_rx/ring_tx | |
592 | * @buffer_phy: DMA mapped address of the frame | |
4ffe722e | 593 | * @callback: Callback called when the frame is finished (optional) |
3b3d9f4d MW |
594 | * @list: Frame is linked to a queue using this |
595 | * @size: Size of the frame in bytes (%0 means %4096) | |
596 | * @flags: Flags for the frame (see &enum ring_desc_flags) | |
597 | * @eof: End of frame protocol defined field | |
598 | * @sof: Start of frame protocol defined field | |
599 | */ | |
600 | struct ring_frame { | |
601 | dma_addr_t buffer_phy; | |
602 | ring_cb callback; | |
603 | struct list_head list; | |
604 | u32 size:12; | |
605 | u32 flags:12; | |
606 | u32 eof:4; | |
607 | u32 sof:4; | |
608 | }; | |
609 | ||
610 | /* Minimum size for ring_rx */ | |
611 | #define TB_FRAME_SIZE 0x100 | |
612 | ||
613 | struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, | |
614 | unsigned int flags); | |
615 | struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, | |
afe704a2 MW |
616 | unsigned int flags, int e2e_tx_hop, |
617 | u16 sof_mask, u16 eof_mask, | |
4ffe722e | 618 | void (*start_poll)(void *), void *poll_data); |
3b3d9f4d MW |
619 | void tb_ring_start(struct tb_ring *ring); |
620 | void tb_ring_stop(struct tb_ring *ring); | |
621 | void tb_ring_free(struct tb_ring *ring); | |
622 | ||
623 | int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); | |
624 | ||
625 | /** | |
626 | * tb_ring_rx() - enqueue a frame on an RX ring | |
627 | * @ring: Ring to enqueue the frame | |
628 | * @frame: Frame to enqueue | |
629 | * | |
4ffe722e MW |
630 | * @frame->buffer, @frame->buffer_phy have to be set. The buffer must |
631 | * contain at least %TB_FRAME_SIZE bytes. | |
3b3d9f4d MW |
632 | * |
633 | * @frame->callback will be invoked with @frame->size, @frame->flags, | |
634 | * @frame->eof, @frame->sof set once the frame has been received. | |
635 | * | |
636 | * If ring_stop() is called after the packet has been enqueued | |
637 | * @frame->callback will be called with canceled set to true. | |
638 | * | |
639 | * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. | |
640 | */ | |
641 | static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame) | |
642 | { | |
643 | WARN_ON(ring->is_tx); | |
644 | return __tb_ring_enqueue(ring, frame); | |
645 | } | |
646 | ||
647 | /** | |
648 | * tb_ring_tx() - enqueue a frame on an TX ring | |
649 | * @ring: Ring the enqueue the frame | |
650 | * @frame: Frame to enqueue | |
651 | * | |
4ffe722e MW |
652 | * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and |
653 | * @frame->sof have to be set. | |
3b3d9f4d MW |
654 | * |
655 | * @frame->callback will be invoked with once the frame has been transmitted. | |
656 | * | |
657 | * If ring_stop() is called after the packet has been enqueued @frame->callback | |
658 | * will be called with canceled set to true. | |
659 | * | |
660 | * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. | |
661 | */ | |
662 | static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame) | |
663 | { | |
664 | WARN_ON(!ring->is_tx); | |
665 | return __tb_ring_enqueue(ring, frame); | |
666 | } | |
667 | ||
4ffe722e MW |
668 | /* Used only when the ring is in polling mode */ |
669 | struct ring_frame *tb_ring_poll(struct tb_ring *ring); | |
670 | void tb_ring_poll_complete(struct tb_ring *ring); | |
671 | ||
3304559e MW |
672 | /** |
673 | * tb_ring_dma_device() - Return device used for DMA mapping | |
674 | * @ring: Ring whose DMA device is retrieved | |
675 | * | |
676 | * Use this function when you are mapping DMA for buffers that are | |
677 | * passed to the ring for sending/receiving. | |
678 | */ | |
679 | static inline struct device *tb_ring_dma_device(struct tb_ring *ring) | |
680 | { | |
681 | return &ring->nhi->pdev->dev; | |
682 | } | |
683 | ||
e80c2359 AB |
684 | bool usb4_usb3_port_match(struct device *usb4_port_dev, |
685 | const struct fwnode_handle *usb3_port_fwnode); | |
686 | ||
687 | #else /* CONFIG_USB4 */ | |
688 | static inline bool usb4_usb3_port_match(struct device *usb4_port_dev, | |
689 | const struct fwnode_handle *usb3_port_fwnode) | |
690 | { | |
691 | return false; | |
692 | } | |
693 | #endif /* CONFIG_USB4 */ | |
694 | ||
cdae7c07 | 695 | #endif /* THUNDERBOLT_H_ */ |