Commit | Line | Data |
---|---|---|
fd3b339c | 1 | // SPDX-License-Identifier: GPL-2.0 |
f67cf491 MW |
2 | /* |
3 | * Internal Thunderbolt Connection Manager. This is a firmware running on | |
4 | * the Thunderbolt host controller performing most of the low-level | |
5 | * handling. | |
6 | * | |
7 | * Copyright (C) 2017, Intel Corporation | |
8 | * Authors: Michael Jamet <michael.jamet@intel.com> | |
9 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
f67cf491 MW |
10 | */ |
11 | ||
12 | #include <linux/delay.h> | |
f67cf491 MW |
13 | #include <linux/mutex.h> |
14 | #include <linux/pci.h> | |
2d8ff0b5 | 15 | #include <linux/pm_runtime.h> |
630b3aff | 16 | #include <linux/platform_data/x86/apple.h> |
f67cf491 MW |
17 | #include <linux/sizes.h> |
18 | #include <linux/slab.h> | |
19 | #include <linux/workqueue.h> | |
20 | ||
21 | #include "ctl.h" | |
22 | #include "nhi_regs.h" | |
23 | #include "tb.h" | |
24 | ||
25 | #define PCIE2CIO_CMD 0x30 | |
26 | #define PCIE2CIO_CMD_TIMEOUT BIT(31) | |
27 | #define PCIE2CIO_CMD_START BIT(30) | |
28 | #define PCIE2CIO_CMD_WRITE BIT(21) | |
29 | #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) | |
30 | #define PCIE2CIO_CMD_CS_SHIFT 19 | |
31 | #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) | |
32 | #define PCIE2CIO_CMD_PORT_SHIFT 13 | |
33 | ||
34 | #define PCIE2CIO_WRDATA 0x34 | |
35 | #define PCIE2CIO_RDDATA 0x38 | |
36 | ||
37 | #define PHY_PORT_CS1 0x37 | |
38 | #define PHY_PORT_CS1_LINK_DISABLE BIT(14) | |
39 | #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) | |
40 | #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 | |
41 | ||
0b0a0bd0 MW |
42 | #define ICM_TIMEOUT 5000 /* ms */ |
43 | #define ICM_APPROVE_TIMEOUT 10000 /* ms */ | |
f67cf491 MW |
44 | #define ICM_MAX_LINK 4 |
45 | #define ICM_MAX_DEPTH 6 | |
46 | ||
47 | /** | |
48 | * struct icm - Internal connection manager private data | |
49 | * @request_lock: Makes sure only one message is send to ICM at time | |
50 | * @rescan_work: Work used to rescan the surviving switches after resume | |
51 | * @upstream_port: Pointer to the PCIe upstream port this host | |
52 | * controller is connected. This is only set for systems | |
53 | * where ICM needs to be started manually | |
54 | * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides | |
55 | * (only set when @upstream_port is not %NULL) | |
e6b245cc | 56 | * @safe_mode: ICM is in safe mode |
9aaa3b8b | 57 | * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) |
2d8ff0b5 | 58 | * @rpm: Does the controller support runtime PM (RTD3) |
f67cf491 MW |
59 | * @is_supported: Checks if we can support ICM on this controller |
60 | * @get_mode: Read and return the ICM firmware mode (optional) | |
61 | * @get_route: Find a route string for given switch | |
d04522fa | 62 | * @save_devices: Ask ICM to save devices to ACL when suspending (optional) |
3080e197 | 63 | * @driver_ready: Send driver ready message to ICM |
f67cf491 MW |
64 | * @device_connected: Handle device connected ICM message |
65 | * @device_disconnected: Handle device disconnected ICM message | |
d1ff7024 MW |
66 | * @xdomain_connected - Handle XDomain connected ICM message |
67 | * @xdomain_disconnected - Handle XDomain disconnected ICM message | |
f67cf491 MW |
68 | */ |
69 | struct icm { | |
70 | struct mutex request_lock; | |
71 | struct delayed_work rescan_work; | |
72 | struct pci_dev *upstream_port; | |
9aaa3b8b | 73 | size_t max_boot_acl; |
f67cf491 | 74 | int vnd_cap; |
e6b245cc | 75 | bool safe_mode; |
2d8ff0b5 | 76 | bool rpm; |
f67cf491 MW |
77 | bool (*is_supported)(struct tb *tb); |
78 | int (*get_mode)(struct tb *tb); | |
79 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); | |
d04522fa | 80 | void (*save_devices)(struct tb *tb); |
3080e197 | 81 | int (*driver_ready)(struct tb *tb, |
9aaa3b8b | 82 | enum tb_security_level *security_level, |
2d8ff0b5 | 83 | size_t *nboot_acl, bool *rpm); |
f67cf491 MW |
84 | void (*device_connected)(struct tb *tb, |
85 | const struct icm_pkg_header *hdr); | |
86 | void (*device_disconnected)(struct tb *tb, | |
87 | const struct icm_pkg_header *hdr); | |
d1ff7024 MW |
88 | void (*xdomain_connected)(struct tb *tb, |
89 | const struct icm_pkg_header *hdr); | |
90 | void (*xdomain_disconnected)(struct tb *tb, | |
91 | const struct icm_pkg_header *hdr); | |
f67cf491 MW |
92 | }; |
93 | ||
94 | struct icm_notification { | |
95 | struct work_struct work; | |
96 | struct icm_pkg_header *pkg; | |
97 | struct tb *tb; | |
98 | }; | |
99 | ||
2d8ff0b5 MW |
100 | struct ep_name_entry { |
101 | u8 len; | |
102 | u8 type; | |
103 | u8 data[0]; | |
104 | }; | |
105 | ||
106 | #define EP_NAME_INTEL_VSS 0x10 | |
107 | ||
108 | /* Intel Vendor specific structure */ | |
109 | struct intel_vss { | |
110 | u16 vendor; | |
111 | u16 model; | |
112 | u8 mc; | |
113 | u8 flags; | |
114 | u16 pci_devid; | |
115 | u32 nvm_version; | |
116 | }; | |
117 | ||
118 | #define INTEL_VSS_FLAGS_RTD3 BIT(0) | |
119 | ||
120 | static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) | |
121 | { | |
122 | const void *end = ep_name + size; | |
123 | ||
124 | while (ep_name < end) { | |
125 | const struct ep_name_entry *ep = ep_name; | |
126 | ||
127 | if (!ep->len) | |
128 | break; | |
129 | if (ep_name + ep->len > end) | |
130 | break; | |
131 | ||
132 | if (ep->type == EP_NAME_INTEL_VSS) | |
133 | return (const struct intel_vss *)ep->data; | |
134 | ||
135 | ep_name += ep->len; | |
136 | } | |
137 | ||
138 | return NULL; | |
139 | } | |
140 | ||
f67cf491 MW |
141 | static inline struct tb *icm_to_tb(struct icm *icm) |
142 | { | |
143 | return ((void *)icm - sizeof(struct tb)); | |
144 | } | |
145 | ||
146 | static inline u8 phy_port_from_route(u64 route, u8 depth) | |
147 | { | |
d1ff7024 MW |
148 | u8 link; |
149 | ||
150 | link = depth ? route >> ((depth - 1) * 8) : route; | |
151 | return tb_phy_port_from_link(link); | |
f67cf491 MW |
152 | } |
153 | ||
154 | static inline u8 dual_link_from_link(u8 link) | |
155 | { | |
156 | return link ? ((link - 1) ^ 0x01) + 1 : 0; | |
157 | } | |
158 | ||
159 | static inline u64 get_route(u32 route_hi, u32 route_lo) | |
160 | { | |
161 | return (u64)route_hi << 32 | route_lo; | |
162 | } | |
163 | ||
4bac471d RM |
164 | static inline u64 get_parent_route(u64 route) |
165 | { | |
166 | int depth = tb_route_length(route); | |
167 | return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; | |
168 | } | |
169 | ||
f67cf491 MW |
170 | static bool icm_match(const struct tb_cfg_request *req, |
171 | const struct ctl_pkg *pkg) | |
172 | { | |
173 | const struct icm_pkg_header *res_hdr = pkg->buffer; | |
174 | const struct icm_pkg_header *req_hdr = req->request; | |
175 | ||
176 | if (pkg->frame.eof != req->response_type) | |
177 | return false; | |
178 | if (res_hdr->code != req_hdr->code) | |
179 | return false; | |
180 | ||
181 | return true; | |
182 | } | |
183 | ||
184 | static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) | |
185 | { | |
186 | const struct icm_pkg_header *hdr = pkg->buffer; | |
187 | ||
188 | if (hdr->packet_id < req->npackets) { | |
189 | size_t offset = hdr->packet_id * req->response_size; | |
190 | ||
191 | memcpy(req->response + offset, pkg->buffer, req->response_size); | |
192 | } | |
193 | ||
194 | return hdr->packet_id == hdr->total_packets - 1; | |
195 | } | |
196 | ||
197 | static int icm_request(struct tb *tb, const void *request, size_t request_size, | |
198 | void *response, size_t response_size, size_t npackets, | |
199 | unsigned int timeout_msec) | |
200 | { | |
201 | struct icm *icm = tb_priv(tb); | |
202 | int retries = 3; | |
203 | ||
204 | do { | |
205 | struct tb_cfg_request *req; | |
206 | struct tb_cfg_result res; | |
207 | ||
208 | req = tb_cfg_request_alloc(); | |
209 | if (!req) | |
210 | return -ENOMEM; | |
211 | ||
212 | req->match = icm_match; | |
213 | req->copy = icm_copy; | |
214 | req->request = request; | |
215 | req->request_size = request_size; | |
216 | req->request_type = TB_CFG_PKG_ICM_CMD; | |
217 | req->response = response; | |
218 | req->npackets = npackets; | |
219 | req->response_size = response_size; | |
220 | req->response_type = TB_CFG_PKG_ICM_RESP; | |
221 | ||
222 | mutex_lock(&icm->request_lock); | |
223 | res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); | |
224 | mutex_unlock(&icm->request_lock); | |
225 | ||
226 | tb_cfg_request_put(req); | |
227 | ||
228 | if (res.err != -ETIMEDOUT) | |
229 | return res.err == 1 ? -EIO : res.err; | |
230 | ||
231 | usleep_range(20, 50); | |
232 | } while (retries--); | |
233 | ||
234 | return -ETIMEDOUT; | |
235 | } | |
236 | ||
237 | static bool icm_fr_is_supported(struct tb *tb) | |
238 | { | |
630b3aff | 239 | return !x86_apple_machine; |
f67cf491 MW |
240 | } |
241 | ||
242 | static inline int icm_fr_get_switch_index(u32 port) | |
243 | { | |
244 | int index; | |
245 | ||
246 | if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) | |
247 | return 0; | |
248 | ||
249 | index = port >> ICM_PORT_INDEX_SHIFT; | |
250 | return index != 0xff ? index : 0; | |
251 | } | |
252 | ||
253 | static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) | |
254 | { | |
255 | struct icm_fr_pkg_get_topology_response *switches, *sw; | |
256 | struct icm_fr_pkg_get_topology request = { | |
257 | .hdr = { .code = ICM_GET_TOPOLOGY }, | |
258 | }; | |
259 | size_t npackets = ICM_GET_TOPOLOGY_PACKETS; | |
260 | int ret, index; | |
261 | u8 i; | |
262 | ||
263 | switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); | |
264 | if (!switches) | |
265 | return -ENOMEM; | |
266 | ||
267 | ret = icm_request(tb, &request, sizeof(request), switches, | |
268 | sizeof(*switches), npackets, ICM_TIMEOUT); | |
269 | if (ret) | |
270 | goto err_free; | |
271 | ||
272 | sw = &switches[0]; | |
273 | index = icm_fr_get_switch_index(sw->ports[link]); | |
274 | if (!index) { | |
275 | ret = -ENODEV; | |
276 | goto err_free; | |
277 | } | |
278 | ||
279 | sw = &switches[index]; | |
280 | for (i = 1; i < depth; i++) { | |
281 | unsigned int j; | |
282 | ||
283 | if (!(sw->first_data & ICM_SWITCH_USED)) { | |
284 | ret = -ENODEV; | |
285 | goto err_free; | |
286 | } | |
287 | ||
288 | for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { | |
289 | index = icm_fr_get_switch_index(sw->ports[j]); | |
290 | if (index > sw->switch_index) { | |
291 | sw = &switches[index]; | |
292 | break; | |
293 | } | |
294 | } | |
295 | } | |
296 | ||
297 | *route = get_route(sw->route_hi, sw->route_lo); | |
298 | ||
299 | err_free: | |
300 | kfree(switches); | |
301 | return ret; | |
302 | } | |
303 | ||
d04522fa MW |
304 | static void icm_fr_save_devices(struct tb *tb) |
305 | { | |
306 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); | |
307 | } | |
308 | ||
3080e197 | 309 | static int |
9aaa3b8b | 310 | icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
2d8ff0b5 | 311 | size_t *nboot_acl, bool *rpm) |
3080e197 MW |
312 | { |
313 | struct icm_fr_pkg_driver_ready_response reply; | |
314 | struct icm_pkg_driver_ready request = { | |
315 | .hdr.code = ICM_DRIVER_READY, | |
316 | }; | |
317 | int ret; | |
318 | ||
319 | memset(&reply, 0, sizeof(reply)); | |
320 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
321 | 1, ICM_TIMEOUT); | |
322 | if (ret) | |
323 | return ret; | |
324 | ||
325 | if (security_level) | |
326 | *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; | |
327 | ||
328 | return 0; | |
329 | } | |
330 | ||
f67cf491 MW |
331 | static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) |
332 | { | |
333 | struct icm_fr_pkg_approve_device request; | |
334 | struct icm_fr_pkg_approve_device reply; | |
335 | int ret; | |
336 | ||
337 | memset(&request, 0, sizeof(request)); | |
338 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
339 | request.hdr.code = ICM_APPROVE_DEVICE; | |
340 | request.connection_id = sw->connection_id; | |
341 | request.connection_key = sw->connection_key; | |
342 | ||
343 | memset(&reply, 0, sizeof(reply)); | |
344 | /* Use larger timeout as establishing tunnels can take some time */ | |
345 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
0b0a0bd0 | 346 | 1, ICM_APPROVE_TIMEOUT); |
f67cf491 MW |
347 | if (ret) |
348 | return ret; | |
349 | ||
350 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
351 | tb_warn(tb, "PCIe tunnel creation failed\n"); | |
352 | return -EIO; | |
353 | } | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) | |
359 | { | |
360 | struct icm_fr_pkg_add_device_key request; | |
361 | struct icm_fr_pkg_add_device_key_response reply; | |
362 | int ret; | |
363 | ||
364 | memset(&request, 0, sizeof(request)); | |
365 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
366 | request.hdr.code = ICM_ADD_DEVICE_KEY; | |
367 | request.connection_id = sw->connection_id; | |
368 | request.connection_key = sw->connection_key; | |
369 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); | |
370 | ||
371 | memset(&reply, 0, sizeof(reply)); | |
372 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
373 | 1, ICM_TIMEOUT); | |
374 | if (ret) | |
375 | return ret; | |
376 | ||
377 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
378 | tb_warn(tb, "Adding key to switch failed\n"); | |
379 | return -EIO; | |
380 | } | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, | |
386 | const u8 *challenge, u8 *response) | |
387 | { | |
388 | struct icm_fr_pkg_challenge_device request; | |
389 | struct icm_fr_pkg_challenge_device_response reply; | |
390 | int ret; | |
391 | ||
392 | memset(&request, 0, sizeof(request)); | |
393 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
394 | request.hdr.code = ICM_CHALLENGE_DEVICE; | |
395 | request.connection_id = sw->connection_id; | |
396 | request.connection_key = sw->connection_key; | |
397 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); | |
398 | ||
399 | memset(&reply, 0, sizeof(reply)); | |
400 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
401 | 1, ICM_TIMEOUT); | |
402 | if (ret) | |
403 | return ret; | |
404 | ||
405 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
406 | return -EKEYREJECTED; | |
407 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) | |
408 | return -ENOKEY; | |
409 | ||
410 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); | |
411 | ||
412 | return 0; | |
413 | } | |
414 | ||
d1ff7024 MW |
415 | static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
416 | { | |
417 | struct icm_fr_pkg_approve_xdomain_response reply; | |
418 | struct icm_fr_pkg_approve_xdomain request; | |
419 | int ret; | |
420 | ||
421 | memset(&request, 0, sizeof(request)); | |
422 | request.hdr.code = ICM_APPROVE_XDOMAIN; | |
423 | request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; | |
424 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
425 | ||
426 | request.transmit_path = xd->transmit_path; | |
427 | request.transmit_ring = xd->transmit_ring; | |
428 | request.receive_path = xd->receive_path; | |
429 | request.receive_ring = xd->receive_ring; | |
430 | ||
431 | memset(&reply, 0, sizeof(reply)); | |
432 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
433 | 1, ICM_TIMEOUT); | |
434 | if (ret) | |
435 | return ret; | |
436 | ||
437 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
438 | return -EIO; | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
443 | static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
444 | { | |
445 | u8 phy_port; | |
446 | u8 cmd; | |
447 | ||
448 | phy_port = tb_phy_port_from_link(xd->link); | |
449 | if (phy_port == 0) | |
450 | cmd = NHI_MAILBOX_DISCONNECT_PA; | |
451 | else | |
452 | cmd = NHI_MAILBOX_DISCONNECT_PB; | |
453 | ||
454 | nhi_mailbox_cmd(tb->nhi, cmd, 1); | |
455 | usleep_range(10, 50); | |
456 | nhi_mailbox_cmd(tb->nhi, cmd, 2); | |
457 | return 0; | |
458 | } | |
459 | ||
ee487dd2 | 460 | static void add_switch(struct tb_switch *parent_sw, u64 route, |
2d8ff0b5 MW |
461 | const uuid_t *uuid, const u8 *ep_name, |
462 | size_t ep_name_size, u8 connection_id, u8 connection_key, | |
ee487dd2 | 463 | u8 link, u8 depth, enum tb_security_level security_level, |
14862ee3 | 464 | bool authorized, bool boot) |
ee487dd2 | 465 | { |
2d8ff0b5 | 466 | const struct intel_vss *vss; |
ee487dd2 MW |
467 | struct tb_switch *sw; |
468 | ||
2d8ff0b5 MW |
469 | pm_runtime_get_sync(&parent_sw->dev); |
470 | ||
ee487dd2 MW |
471 | sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); |
472 | if (!sw) | |
2d8ff0b5 | 473 | goto out; |
ee487dd2 MW |
474 | |
475 | sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); | |
476 | sw->connection_id = connection_id; | |
477 | sw->connection_key = connection_key; | |
478 | sw->link = link; | |
479 | sw->depth = depth; | |
480 | sw->authorized = authorized; | |
481 | sw->security_level = security_level; | |
14862ee3 | 482 | sw->boot = boot; |
ee487dd2 | 483 | |
2d8ff0b5 MW |
484 | vss = parse_intel_vss(ep_name, ep_name_size); |
485 | if (vss) | |
486 | sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); | |
487 | ||
ee487dd2 MW |
488 | /* Link the two switches now */ |
489 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); | |
490 | tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); | |
491 | ||
492 | if (tb_switch_add(sw)) { | |
493 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
494 | tb_switch_put(sw); | |
ee487dd2 | 495 | } |
2d8ff0b5 MW |
496 | |
497 | out: | |
498 | pm_runtime_mark_last_busy(&parent_sw->dev); | |
499 | pm_runtime_put_autosuspend(&parent_sw->dev); | |
ee487dd2 MW |
500 | } |
501 | ||
502 | static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, | |
503 | u64 route, u8 connection_id, u8 connection_key, | |
14862ee3 | 504 | u8 link, u8 depth, bool boot) |
ee487dd2 MW |
505 | { |
506 | /* Disconnect from parent */ | |
507 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
508 | /* Re-connect via updated port*/ | |
509 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); | |
510 | ||
511 | /* Update with the new addressing information */ | |
512 | sw->config.route_hi = upper_32_bits(route); | |
513 | sw->config.route_lo = lower_32_bits(route); | |
514 | sw->connection_id = connection_id; | |
515 | sw->connection_key = connection_key; | |
516 | sw->link = link; | |
517 | sw->depth = depth; | |
14862ee3 | 518 | sw->boot = boot; |
ee487dd2 MW |
519 | |
520 | /* This switch still exists */ | |
521 | sw->is_unplugged = false; | |
522 | } | |
523 | ||
f67cf491 MW |
524 | static void remove_switch(struct tb_switch *sw) |
525 | { | |
526 | struct tb_switch *parent_sw; | |
527 | ||
528 | parent_sw = tb_to_switch(sw->dev.parent); | |
529 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
530 | tb_switch_remove(sw); | |
531 | } | |
532 | ||
ee487dd2 MW |
533 | static void add_xdomain(struct tb_switch *sw, u64 route, |
534 | const uuid_t *local_uuid, const uuid_t *remote_uuid, | |
535 | u8 link, u8 depth) | |
536 | { | |
537 | struct tb_xdomain *xd; | |
538 | ||
2d8ff0b5 MW |
539 | pm_runtime_get_sync(&sw->dev); |
540 | ||
ee487dd2 MW |
541 | xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); |
542 | if (!xd) | |
2d8ff0b5 | 543 | goto out; |
ee487dd2 MW |
544 | |
545 | xd->link = link; | |
546 | xd->depth = depth; | |
547 | ||
548 | tb_port_at(route, sw)->xdomain = xd; | |
549 | ||
550 | tb_xdomain_add(xd); | |
2d8ff0b5 MW |
551 | |
552 | out: | |
553 | pm_runtime_mark_last_busy(&sw->dev); | |
554 | pm_runtime_put_autosuspend(&sw->dev); | |
ee487dd2 MW |
555 | } |
556 | ||
557 | static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) | |
558 | { | |
559 | xd->link = link; | |
560 | xd->route = route; | |
561 | xd->is_unplugged = false; | |
562 | } | |
563 | ||
79fae987 MW |
564 | static void remove_xdomain(struct tb_xdomain *xd) |
565 | { | |
566 | struct tb_switch *sw; | |
567 | ||
568 | sw = tb_to_switch(xd->dev.parent); | |
569 | tb_port_at(xd->route, sw)->xdomain = NULL; | |
570 | tb_xdomain_remove(xd); | |
571 | } | |
572 | ||
f67cf491 MW |
573 | static void |
574 | icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
575 | { | |
576 | const struct icm_fr_event_device_connected *pkg = | |
577 | (const struct icm_fr_event_device_connected *)hdr; | |
ee487dd2 | 578 | enum tb_security_level security_level; |
f67cf491 MW |
579 | struct tb_switch *sw, *parent_sw; |
580 | struct icm *icm = tb_priv(tb); | |
581 | bool authorized = false; | |
79fae987 | 582 | struct tb_xdomain *xd; |
f67cf491 | 583 | u8 link, depth; |
14862ee3 | 584 | bool boot; |
f67cf491 MW |
585 | u64 route; |
586 | int ret; | |
587 | ||
588 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; | |
589 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
590 | ICM_LINK_INFO_DEPTH_SHIFT; | |
591 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
ee487dd2 MW |
592 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
593 | ICM_FLAGS_SLEVEL_SHIFT; | |
14862ee3 | 594 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
f67cf491 | 595 | |
cb653eec MW |
596 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
597 | tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", | |
598 | link, depth); | |
599 | return; | |
600 | } | |
601 | ||
f67cf491 MW |
602 | sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); |
603 | if (sw) { | |
604 | u8 phy_port, sw_phy_port; | |
605 | ||
606 | parent_sw = tb_to_switch(sw->dev.parent); | |
fdd92e89 MW |
607 | sw_phy_port = tb_phy_port_from_link(sw->link); |
608 | phy_port = tb_phy_port_from_link(link); | |
f67cf491 MW |
609 | |
610 | /* | |
611 | * On resume ICM will send us connected events for the | |
612 | * devices that still are present. However, that | |
613 | * information might have changed for example by the | |
614 | * fact that a switch on a dual-link connection might | |
615 | * have been enumerated using the other link now. Make | |
616 | * sure our book keeping matches that. | |
617 | */ | |
618 | if (sw->depth == depth && sw_phy_port == phy_port && | |
619 | !!sw->authorized == authorized) { | |
fdd92e89 MW |
620 | /* |
621 | * It was enumerated through another link so update | |
622 | * route string accordingly. | |
623 | */ | |
624 | if (sw->link != link) { | |
625 | ret = icm->get_route(tb, link, depth, &route); | |
626 | if (ret) { | |
627 | tb_err(tb, "failed to update route string for switch at %u.%u\n", | |
628 | link, depth); | |
629 | tb_switch_put(sw); | |
630 | return; | |
631 | } | |
632 | } else { | |
633 | route = tb_route(sw); | |
634 | } | |
635 | ||
ee487dd2 | 636 | update_switch(parent_sw, sw, route, pkg->connection_id, |
14862ee3 | 637 | pkg->connection_key, link, depth, boot); |
f67cf491 MW |
638 | tb_switch_put(sw); |
639 | return; | |
640 | } | |
641 | ||
642 | /* | |
643 | * User connected the same switch to another physical | |
644 | * port or to another part of the topology. Remove the | |
645 | * existing switch now before adding the new one. | |
646 | */ | |
647 | remove_switch(sw); | |
648 | tb_switch_put(sw); | |
649 | } | |
650 | ||
651 | /* | |
652 | * If the switch was not found by UUID, look for a switch on | |
653 | * same physical port (taking possible link aggregation into | |
654 | * account) and depth. If we found one it is definitely a stale | |
655 | * one so remove it first. | |
656 | */ | |
657 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
658 | if (!sw) { | |
659 | u8 dual_link; | |
660 | ||
661 | dual_link = dual_link_from_link(link); | |
662 | if (dual_link) | |
663 | sw = tb_switch_find_by_link_depth(tb, dual_link, depth); | |
664 | } | |
665 | if (sw) { | |
666 | remove_switch(sw); | |
667 | tb_switch_put(sw); | |
668 | } | |
669 | ||
79fae987 MW |
670 | /* Remove existing XDomain connection if found */ |
671 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); | |
672 | if (xd) { | |
673 | remove_xdomain(xd); | |
674 | tb_xdomain_put(xd); | |
675 | } | |
676 | ||
f67cf491 MW |
677 | parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); |
678 | if (!parent_sw) { | |
679 | tb_err(tb, "failed to find parent switch for %u.%u\n", | |
680 | link, depth); | |
681 | return; | |
682 | } | |
683 | ||
fdd92e89 MW |
684 | ret = icm->get_route(tb, link, depth, &route); |
685 | if (ret) { | |
686 | tb_err(tb, "failed to find route string for switch at %u.%u\n", | |
687 | link, depth); | |
688 | tb_switch_put(parent_sw); | |
689 | return; | |
690 | } | |
691 | ||
2d8ff0b5 MW |
692 | add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
693 | sizeof(pkg->ep_name), pkg->connection_id, | |
ee487dd2 | 694 | pkg->connection_key, link, depth, security_level, |
14862ee3 | 695 | authorized, boot); |
f67cf491 | 696 | |
f67cf491 MW |
697 | tb_switch_put(parent_sw); |
698 | } | |
699 | ||
700 | static void | |
701 | icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
702 | { | |
703 | const struct icm_fr_event_device_disconnected *pkg = | |
704 | (const struct icm_fr_event_device_disconnected *)hdr; | |
705 | struct tb_switch *sw; | |
706 | u8 link, depth; | |
707 | ||
708 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; | |
709 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
710 | ICM_LINK_INFO_DEPTH_SHIFT; | |
711 | ||
712 | if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { | |
713 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); | |
714 | return; | |
715 | } | |
716 | ||
717 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
718 | if (!sw) { | |
719 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, | |
720 | depth); | |
721 | return; | |
722 | } | |
723 | ||
724 | remove_switch(sw); | |
725 | tb_switch_put(sw); | |
726 | } | |
727 | ||
d1ff7024 MW |
728 | static void |
729 | icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
730 | { | |
731 | const struct icm_fr_event_xdomain_connected *pkg = | |
732 | (const struct icm_fr_event_xdomain_connected *)hdr; | |
733 | struct tb_xdomain *xd; | |
734 | struct tb_switch *sw; | |
735 | u8 link, depth; | |
d1ff7024 MW |
736 | u64 route; |
737 | ||
d1ff7024 MW |
738 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
739 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
740 | ICM_LINK_INFO_DEPTH_SHIFT; | |
d1ff7024 MW |
741 | |
742 | if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { | |
743 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); | |
744 | return; | |
745 | } | |
746 | ||
747 | route = get_route(pkg->local_route_hi, pkg->local_route_lo); | |
748 | ||
749 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
750 | if (xd) { | |
751 | u8 xd_phy_port, phy_port; | |
752 | ||
753 | xd_phy_port = phy_port_from_route(xd->route, xd->depth); | |
754 | phy_port = phy_port_from_route(route, depth); | |
755 | ||
756 | if (xd->depth == depth && xd_phy_port == phy_port) { | |
ee487dd2 | 757 | update_xdomain(xd, route, link); |
d1ff7024 MW |
758 | tb_xdomain_put(xd); |
759 | return; | |
760 | } | |
761 | ||
762 | /* | |
763 | * If we find an existing XDomain connection remove it | |
764 | * now. We need to go through login handshake and | |
765 | * everything anyway to be able to re-establish the | |
766 | * connection. | |
767 | */ | |
768 | remove_xdomain(xd); | |
769 | tb_xdomain_put(xd); | |
770 | } | |
771 | ||
772 | /* | |
773 | * Look if there already exists an XDomain in the same place | |
774 | * than the new one and in that case remove it because it is | |
775 | * most likely another host that got disconnected. | |
776 | */ | |
777 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); | |
778 | if (!xd) { | |
779 | u8 dual_link; | |
780 | ||
781 | dual_link = dual_link_from_link(link); | |
782 | if (dual_link) | |
783 | xd = tb_xdomain_find_by_link_depth(tb, dual_link, | |
784 | depth); | |
785 | } | |
786 | if (xd) { | |
787 | remove_xdomain(xd); | |
788 | tb_xdomain_put(xd); | |
789 | } | |
790 | ||
791 | /* | |
792 | * If the user disconnected a switch during suspend and | |
793 | * connected another host to the same port, remove the switch | |
794 | * first. | |
795 | */ | |
796 | sw = get_switch_at_route(tb->root_switch, route); | |
797 | if (sw) | |
798 | remove_switch(sw); | |
799 | ||
800 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
801 | if (!sw) { | |
802 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, | |
803 | depth); | |
804 | return; | |
805 | } | |
806 | ||
ee487dd2 MW |
807 | add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, |
808 | depth); | |
d1ff7024 MW |
809 | tb_switch_put(sw); |
810 | } | |
811 | ||
812 | static void | |
813 | icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
814 | { | |
815 | const struct icm_fr_event_xdomain_disconnected *pkg = | |
816 | (const struct icm_fr_event_xdomain_disconnected *)hdr; | |
817 | struct tb_xdomain *xd; | |
818 | ||
819 | /* | |
820 | * If the connection is through one or multiple devices, the | |
821 | * XDomain device is removed along with them so it is fine if we | |
822 | * cannot find it here. | |
823 | */ | |
824 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
825 | if (xd) { | |
826 | remove_xdomain(xd); | |
827 | tb_xdomain_put(xd); | |
828 | } | |
829 | } | |
830 | ||
4bac471d RM |
831 | static int |
832 | icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
2d8ff0b5 | 833 | size_t *nboot_acl, bool *rpm) |
4bac471d RM |
834 | { |
835 | struct icm_tr_pkg_driver_ready_response reply; | |
836 | struct icm_pkg_driver_ready request = { | |
837 | .hdr.code = ICM_DRIVER_READY, | |
838 | }; | |
839 | int ret; | |
840 | ||
841 | memset(&reply, 0, sizeof(reply)); | |
842 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
843 | 1, 20000); | |
844 | if (ret) | |
845 | return ret; | |
846 | ||
847 | if (security_level) | |
848 | *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; | |
849 | if (nboot_acl) | |
850 | *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> | |
851 | ICM_TR_INFO_BOOT_ACL_SHIFT; | |
2d8ff0b5 MW |
852 | if (rpm) |
853 | *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); | |
854 | ||
4bac471d RM |
855 | return 0; |
856 | } | |
857 | ||
858 | static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) | |
859 | { | |
860 | struct icm_tr_pkg_approve_device request; | |
861 | struct icm_tr_pkg_approve_device reply; | |
862 | int ret; | |
863 | ||
864 | memset(&request, 0, sizeof(request)); | |
865 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
866 | request.hdr.code = ICM_APPROVE_DEVICE; | |
867 | request.route_lo = sw->config.route_lo; | |
868 | request.route_hi = sw->config.route_hi; | |
869 | request.connection_id = sw->connection_id; | |
870 | ||
871 | memset(&reply, 0, sizeof(reply)); | |
872 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
873 | 1, ICM_APPROVE_TIMEOUT); | |
874 | if (ret) | |
875 | return ret; | |
876 | ||
877 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
878 | tb_warn(tb, "PCIe tunnel creation failed\n"); | |
879 | return -EIO; | |
880 | } | |
881 | ||
882 | return 0; | |
883 | } | |
884 | ||
885 | static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) | |
886 | { | |
887 | struct icm_tr_pkg_add_device_key_response reply; | |
888 | struct icm_tr_pkg_add_device_key request; | |
889 | int ret; | |
890 | ||
891 | memset(&request, 0, sizeof(request)); | |
892 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
893 | request.hdr.code = ICM_ADD_DEVICE_KEY; | |
894 | request.route_lo = sw->config.route_lo; | |
895 | request.route_hi = sw->config.route_hi; | |
896 | request.connection_id = sw->connection_id; | |
897 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); | |
898 | ||
899 | memset(&reply, 0, sizeof(reply)); | |
900 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
901 | 1, ICM_TIMEOUT); | |
902 | if (ret) | |
903 | return ret; | |
904 | ||
905 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
906 | tb_warn(tb, "Adding key to switch failed\n"); | |
907 | return -EIO; | |
908 | } | |
909 | ||
910 | return 0; | |
911 | } | |
912 | ||
913 | static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, | |
914 | const u8 *challenge, u8 *response) | |
915 | { | |
916 | struct icm_tr_pkg_challenge_device_response reply; | |
917 | struct icm_tr_pkg_challenge_device request; | |
918 | int ret; | |
919 | ||
920 | memset(&request, 0, sizeof(request)); | |
921 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
922 | request.hdr.code = ICM_CHALLENGE_DEVICE; | |
923 | request.route_lo = sw->config.route_lo; | |
924 | request.route_hi = sw->config.route_hi; | |
925 | request.connection_id = sw->connection_id; | |
926 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); | |
927 | ||
928 | memset(&reply, 0, sizeof(reply)); | |
929 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
930 | 1, ICM_TIMEOUT); | |
931 | if (ret) | |
932 | return ret; | |
933 | ||
934 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
935 | return -EKEYREJECTED; | |
936 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) | |
937 | return -ENOKEY; | |
938 | ||
939 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); | |
940 | ||
941 | return 0; | |
942 | } | |
943 | ||
944 | static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
945 | { | |
946 | struct icm_tr_pkg_approve_xdomain_response reply; | |
947 | struct icm_tr_pkg_approve_xdomain request; | |
948 | int ret; | |
949 | ||
950 | memset(&request, 0, sizeof(request)); | |
951 | request.hdr.code = ICM_APPROVE_XDOMAIN; | |
952 | request.route_hi = upper_32_bits(xd->route); | |
953 | request.route_lo = lower_32_bits(xd->route); | |
954 | request.transmit_path = xd->transmit_path; | |
955 | request.transmit_ring = xd->transmit_ring; | |
956 | request.receive_path = xd->receive_path; | |
957 | request.receive_ring = xd->receive_ring; | |
958 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
959 | ||
960 | memset(&reply, 0, sizeof(reply)); | |
961 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
962 | 1, ICM_TIMEOUT); | |
963 | if (ret) | |
964 | return ret; | |
965 | ||
966 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
967 | return -EIO; | |
968 | ||
969 | return 0; | |
970 | } | |
971 | ||
972 | static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, | |
973 | int stage) | |
974 | { | |
975 | struct icm_tr_pkg_disconnect_xdomain_response reply; | |
976 | struct icm_tr_pkg_disconnect_xdomain request; | |
977 | int ret; | |
978 | ||
979 | memset(&request, 0, sizeof(request)); | |
980 | request.hdr.code = ICM_DISCONNECT_XDOMAIN; | |
981 | request.stage = stage; | |
982 | request.route_hi = upper_32_bits(xd->route); | |
983 | request.route_lo = lower_32_bits(xd->route); | |
984 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
985 | ||
986 | memset(&reply, 0, sizeof(reply)); | |
987 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
988 | 1, ICM_TIMEOUT); | |
989 | if (ret) | |
990 | return ret; | |
991 | ||
992 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
993 | return -EIO; | |
994 | ||
995 | return 0; | |
996 | } | |
997 | ||
998 | static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
999 | { | |
1000 | int ret; | |
1001 | ||
1002 | ret = icm_tr_xdomain_tear_down(tb, xd, 1); | |
1003 | if (ret) | |
1004 | return ret; | |
1005 | ||
1006 | usleep_range(10, 50); | |
1007 | return icm_tr_xdomain_tear_down(tb, xd, 2); | |
1008 | } | |
1009 | ||
1010 | static void | |
1011 | icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1012 | { | |
1013 | const struct icm_tr_event_device_connected *pkg = | |
1014 | (const struct icm_tr_event_device_connected *)hdr; | |
1015 | enum tb_security_level security_level; | |
1016 | struct tb_switch *sw, *parent_sw; | |
1017 | struct tb_xdomain *xd; | |
1018 | bool authorized, boot; | |
1019 | u64 route; | |
1020 | ||
1021 | /* | |
1022 | * Currently we don't use the QoS information coming with the | |
1023 | * device connected message so simply just ignore that extra | |
1024 | * packet for now. | |
1025 | */ | |
1026 | if (pkg->hdr.packet_id) | |
1027 | return; | |
1028 | ||
4bac471d RM |
1029 | route = get_route(pkg->route_hi, pkg->route_lo); |
1030 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
1031 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> | |
1032 | ICM_FLAGS_SLEVEL_SHIFT; | |
1033 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; | |
1034 | ||
1035 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { | |
1036 | tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", | |
1037 | route); | |
1038 | return; | |
1039 | } | |
1040 | ||
1041 | sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); | |
1042 | if (sw) { | |
1043 | /* Update the switch if it is still in the same place */ | |
1044 | if (tb_route(sw) == route && !!sw->authorized == authorized) { | |
1045 | parent_sw = tb_to_switch(sw->dev.parent); | |
1046 | update_switch(parent_sw, sw, route, pkg->connection_id, | |
1047 | 0, 0, 0, boot); | |
1048 | tb_switch_put(sw); | |
1049 | return; | |
1050 | } | |
1051 | ||
1052 | remove_switch(sw); | |
1053 | tb_switch_put(sw); | |
1054 | } | |
1055 | ||
1056 | /* Another switch with the same address */ | |
1057 | sw = tb_switch_find_by_route(tb, route); | |
1058 | if (sw) { | |
1059 | remove_switch(sw); | |
1060 | tb_switch_put(sw); | |
1061 | } | |
1062 | ||
1063 | /* XDomain connection with the same address */ | |
1064 | xd = tb_xdomain_find_by_route(tb, route); | |
1065 | if (xd) { | |
1066 | remove_xdomain(xd); | |
1067 | tb_xdomain_put(xd); | |
1068 | } | |
1069 | ||
1070 | parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); | |
1071 | if (!parent_sw) { | |
1072 | tb_err(tb, "failed to find parent switch for %llx\n", route); | |
1073 | return; | |
1074 | } | |
1075 | ||
2d8ff0b5 MW |
1076 | add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
1077 | sizeof(pkg->ep_name), pkg->connection_id, | |
4bac471d RM |
1078 | 0, 0, 0, security_level, authorized, boot); |
1079 | ||
1080 | tb_switch_put(parent_sw); | |
1081 | } | |
1082 | ||
1083 | static void | |
1084 | icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1085 | { | |
1086 | const struct icm_tr_event_device_disconnected *pkg = | |
1087 | (const struct icm_tr_event_device_disconnected *)hdr; | |
1088 | struct tb_switch *sw; | |
1089 | u64 route; | |
1090 | ||
1091 | route = get_route(pkg->route_hi, pkg->route_lo); | |
1092 | ||
1093 | sw = tb_switch_find_by_route(tb, route); | |
1094 | if (!sw) { | |
1095 | tb_warn(tb, "no switch exists at %llx, ignoring\n", route); | |
1096 | return; | |
1097 | } | |
1098 | ||
1099 | remove_switch(sw); | |
1100 | tb_switch_put(sw); | |
1101 | } | |
1102 | ||
1103 | static void | |
1104 | icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1105 | { | |
1106 | const struct icm_tr_event_xdomain_connected *pkg = | |
1107 | (const struct icm_tr_event_xdomain_connected *)hdr; | |
1108 | struct tb_xdomain *xd; | |
1109 | struct tb_switch *sw; | |
1110 | u64 route; | |
1111 | ||
1112 | if (!tb->root_switch) | |
1113 | return; | |
1114 | ||
1115 | route = get_route(pkg->local_route_hi, pkg->local_route_lo); | |
1116 | ||
1117 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
1118 | if (xd) { | |
1119 | if (xd->route == route) { | |
1120 | update_xdomain(xd, route, 0); | |
1121 | tb_xdomain_put(xd); | |
1122 | return; | |
1123 | } | |
1124 | ||
1125 | remove_xdomain(xd); | |
1126 | tb_xdomain_put(xd); | |
1127 | } | |
1128 | ||
1129 | /* An existing xdomain with the same address */ | |
1130 | xd = tb_xdomain_find_by_route(tb, route); | |
1131 | if (xd) { | |
1132 | remove_xdomain(xd); | |
1133 | tb_xdomain_put(xd); | |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * If the user disconnected a switch during suspend and | |
1138 | * connected another host to the same port, remove the switch | |
1139 | * first. | |
1140 | */ | |
1141 | sw = get_switch_at_route(tb->root_switch, route); | |
1142 | if (sw) | |
1143 | remove_switch(sw); | |
1144 | ||
1145 | sw = tb_switch_find_by_route(tb, get_parent_route(route)); | |
1146 | if (!sw) { | |
1147 | tb_warn(tb, "no switch exists at %llx, ignoring\n", route); | |
1148 | return; | |
1149 | } | |
1150 | ||
1151 | add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); | |
1152 | tb_switch_put(sw); | |
1153 | } | |
1154 | ||
1155 | static void | |
1156 | icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1157 | { | |
1158 | const struct icm_tr_event_xdomain_disconnected *pkg = | |
1159 | (const struct icm_tr_event_xdomain_disconnected *)hdr; | |
1160 | struct tb_xdomain *xd; | |
1161 | u64 route; | |
1162 | ||
1163 | route = get_route(pkg->route_hi, pkg->route_lo); | |
1164 | ||
1165 | xd = tb_xdomain_find_by_route(tb, route); | |
1166 | if (xd) { | |
1167 | remove_xdomain(xd); | |
1168 | tb_xdomain_put(xd); | |
1169 | } | |
1170 | } | |
1171 | ||
f67cf491 MW |
1172 | static struct pci_dev *get_upstream_port(struct pci_dev *pdev) |
1173 | { | |
1174 | struct pci_dev *parent; | |
1175 | ||
1176 | parent = pci_upstream_bridge(pdev); | |
1177 | while (parent) { | |
1178 | if (!pci_is_pcie(parent)) | |
1179 | return NULL; | |
1180 | if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) | |
1181 | break; | |
1182 | parent = pci_upstream_bridge(parent); | |
1183 | } | |
1184 | ||
1185 | if (!parent) | |
1186 | return NULL; | |
1187 | ||
1188 | switch (parent->device) { | |
1189 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: | |
1190 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: | |
1191 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: | |
1192 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: | |
1193 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: | |
1194 | return parent; | |
1195 | } | |
1196 | ||
1197 | return NULL; | |
1198 | } | |
1199 | ||
1200 | static bool icm_ar_is_supported(struct tb *tb) | |
1201 | { | |
1202 | struct pci_dev *upstream_port; | |
1203 | struct icm *icm = tb_priv(tb); | |
1204 | ||
1205 | /* | |
1206 | * Starting from Alpine Ridge we can use ICM on Apple machines | |
1207 | * as well. We just need to reset and re-enable it first. | |
1208 | */ | |
630b3aff | 1209 | if (!x86_apple_machine) |
f67cf491 MW |
1210 | return true; |
1211 | ||
1212 | /* | |
1213 | * Find the upstream PCIe port in case we need to do reset | |
1214 | * through its vendor specific registers. | |
1215 | */ | |
1216 | upstream_port = get_upstream_port(tb->nhi->pdev); | |
1217 | if (upstream_port) { | |
1218 | int cap; | |
1219 | ||
1220 | cap = pci_find_ext_capability(upstream_port, | |
1221 | PCI_EXT_CAP_ID_VNDR); | |
1222 | if (cap > 0) { | |
1223 | icm->upstream_port = upstream_port; | |
1224 | icm->vnd_cap = cap; | |
1225 | ||
1226 | return true; | |
1227 | } | |
1228 | } | |
1229 | ||
1230 | return false; | |
1231 | } | |
1232 | ||
1233 | static int icm_ar_get_mode(struct tb *tb) | |
1234 | { | |
1235 | struct tb_nhi *nhi = tb->nhi; | |
e4be8c9b | 1236 | int retries = 60; |
f67cf491 MW |
1237 | u32 val; |
1238 | ||
1239 | do { | |
1240 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1241 | if (val & REG_FW_STS_NVM_AUTH_DONE) | |
1242 | break; | |
e4be8c9b | 1243 | msleep(50); |
f67cf491 MW |
1244 | } while (--retries); |
1245 | ||
1246 | if (!retries) { | |
1247 | dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); | |
1248 | return -ENODEV; | |
1249 | } | |
1250 | ||
1251 | return nhi_mailbox_mode(nhi); | |
1252 | } | |
1253 | ||
9aaa3b8b MW |
1254 | static int |
1255 | icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
2d8ff0b5 | 1256 | size_t *nboot_acl, bool *rpm) |
9aaa3b8b MW |
1257 | { |
1258 | struct icm_ar_pkg_driver_ready_response reply; | |
1259 | struct icm_pkg_driver_ready request = { | |
1260 | .hdr.code = ICM_DRIVER_READY, | |
1261 | }; | |
1262 | int ret; | |
1263 | ||
1264 | memset(&reply, 0, sizeof(reply)); | |
1265 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1266 | 1, ICM_TIMEOUT); | |
1267 | if (ret) | |
1268 | return ret; | |
1269 | ||
1270 | if (security_level) | |
1271 | *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; | |
1272 | if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) | |
1273 | *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> | |
1274 | ICM_AR_INFO_BOOT_ACL_SHIFT; | |
2d8ff0b5 MW |
1275 | if (rpm) |
1276 | *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); | |
1277 | ||
9aaa3b8b MW |
1278 | return 0; |
1279 | } | |
1280 | ||
f67cf491 MW |
1281 | static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) |
1282 | { | |
1283 | struct icm_ar_pkg_get_route_response reply; | |
1284 | struct icm_ar_pkg_get_route request = { | |
1285 | .hdr = { .code = ICM_GET_ROUTE }, | |
1286 | .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, | |
1287 | }; | |
1288 | int ret; | |
1289 | ||
1290 | memset(&reply, 0, sizeof(reply)); | |
1291 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1292 | 1, ICM_TIMEOUT); | |
1293 | if (ret) | |
1294 | return ret; | |
1295 | ||
1296 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1297 | return -EIO; | |
1298 | ||
1299 | *route = get_route(reply.route_hi, reply.route_lo); | |
1300 | return 0; | |
1301 | } | |
1302 | ||
9aaa3b8b MW |
1303 | static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) |
1304 | { | |
1305 | struct icm_ar_pkg_preboot_acl_response reply; | |
1306 | struct icm_ar_pkg_preboot_acl request = { | |
1307 | .hdr = { .code = ICM_PREBOOT_ACL }, | |
1308 | }; | |
1309 | int ret, i; | |
1310 | ||
1311 | memset(&reply, 0, sizeof(reply)); | |
1312 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1313 | 1, ICM_TIMEOUT); | |
1314 | if (ret) | |
1315 | return ret; | |
1316 | ||
1317 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1318 | return -EIO; | |
1319 | ||
1320 | for (i = 0; i < nuuids; i++) { | |
1321 | u32 *uuid = (u32 *)&uuids[i]; | |
1322 | ||
1323 | uuid[0] = reply.acl[i].uuid_lo; | |
1324 | uuid[1] = reply.acl[i].uuid_hi; | |
1325 | ||
1326 | if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { | |
1327 | /* Map empty entries to null UUID */ | |
1328 | uuid[0] = 0; | |
1329 | uuid[1] = 0; | |
dd010bd7 | 1330 | } else if (uuid[0] != 0 || uuid[1] != 0) { |
9aaa3b8b MW |
1331 | /* Upper two DWs are always one's */ |
1332 | uuid[2] = 0xffffffff; | |
1333 | uuid[3] = 0xffffffff; | |
1334 | } | |
1335 | } | |
1336 | ||
1337 | return ret; | |
1338 | } | |
1339 | ||
1340 | static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, | |
1341 | size_t nuuids) | |
1342 | { | |
1343 | struct icm_ar_pkg_preboot_acl_response reply; | |
1344 | struct icm_ar_pkg_preboot_acl request = { | |
1345 | .hdr = { | |
1346 | .code = ICM_PREBOOT_ACL, | |
1347 | .flags = ICM_FLAGS_WRITE, | |
1348 | }, | |
1349 | }; | |
1350 | int ret, i; | |
1351 | ||
1352 | for (i = 0; i < nuuids; i++) { | |
1353 | const u32 *uuid = (const u32 *)&uuids[i]; | |
1354 | ||
1355 | if (uuid_is_null(&uuids[i])) { | |
1356 | /* | |
1357 | * Map null UUID to the empty (all one) entries | |
1358 | * for ICM. | |
1359 | */ | |
1360 | request.acl[i].uuid_lo = 0xffffffff; | |
1361 | request.acl[i].uuid_hi = 0xffffffff; | |
1362 | } else { | |
1363 | /* Two high DWs need to be set to all one */ | |
1364 | if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) | |
1365 | return -EINVAL; | |
1366 | ||
1367 | request.acl[i].uuid_lo = uuid[0]; | |
1368 | request.acl[i].uuid_hi = uuid[1]; | |
1369 | } | |
1370 | } | |
1371 | ||
1372 | memset(&reply, 0, sizeof(reply)); | |
1373 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1374 | 1, ICM_TIMEOUT); | |
1375 | if (ret) | |
1376 | return ret; | |
1377 | ||
1378 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1379 | return -EIO; | |
1380 | ||
1381 | return 0; | |
1382 | } | |
1383 | ||
f67cf491 MW |
1384 | static void icm_handle_notification(struct work_struct *work) |
1385 | { | |
1386 | struct icm_notification *n = container_of(work, typeof(*n), work); | |
1387 | struct tb *tb = n->tb; | |
1388 | struct icm *icm = tb_priv(tb); | |
1389 | ||
1390 | mutex_lock(&tb->lock); | |
1391 | ||
86da809d MW |
1392 | /* |
1393 | * When the domain is stopped we flush its workqueue but before | |
1394 | * that the root switch is removed. In that case we should treat | |
1395 | * the queued events as being canceled. | |
1396 | */ | |
1397 | if (tb->root_switch) { | |
1398 | switch (n->pkg->code) { | |
1399 | case ICM_EVENT_DEVICE_CONNECTED: | |
1400 | icm->device_connected(tb, n->pkg); | |
1401 | break; | |
1402 | case ICM_EVENT_DEVICE_DISCONNECTED: | |
1403 | icm->device_disconnected(tb, n->pkg); | |
1404 | break; | |
1405 | case ICM_EVENT_XDOMAIN_CONNECTED: | |
1406 | icm->xdomain_connected(tb, n->pkg); | |
1407 | break; | |
1408 | case ICM_EVENT_XDOMAIN_DISCONNECTED: | |
1409 | icm->xdomain_disconnected(tb, n->pkg); | |
1410 | break; | |
1411 | } | |
f67cf491 MW |
1412 | } |
1413 | ||
1414 | mutex_unlock(&tb->lock); | |
1415 | ||
1416 | kfree(n->pkg); | |
1417 | kfree(n); | |
1418 | } | |
1419 | ||
1420 | static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, | |
1421 | const void *buf, size_t size) | |
1422 | { | |
1423 | struct icm_notification *n; | |
1424 | ||
1425 | n = kmalloc(sizeof(*n), GFP_KERNEL); | |
1426 | if (!n) | |
1427 | return; | |
1428 | ||
1429 | INIT_WORK(&n->work, icm_handle_notification); | |
1430 | n->pkg = kmemdup(buf, size, GFP_KERNEL); | |
1431 | n->tb = tb; | |
1432 | ||
1433 | queue_work(tb->wq, &n->work); | |
1434 | } | |
1435 | ||
1436 | static int | |
9aaa3b8b | 1437 | __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
2d8ff0b5 | 1438 | size_t *nboot_acl, bool *rpm) |
f67cf491 | 1439 | { |
3080e197 | 1440 | struct icm *icm = tb_priv(tb); |
44b51bbb | 1441 | unsigned int retries = 50; |
f67cf491 MW |
1442 | int ret; |
1443 | ||
2d8ff0b5 | 1444 | ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); |
3080e197 MW |
1445 | if (ret) { |
1446 | tb_err(tb, "failed to send driver ready to ICM\n"); | |
f67cf491 | 1447 | return ret; |
3080e197 | 1448 | } |
f67cf491 MW |
1449 | |
1450 | /* | |
1451 | * Hold on here until the switch config space is accessible so | |
1452 | * that we can read root switch config successfully. | |
1453 | */ | |
1454 | do { | |
1455 | struct tb_cfg_result res; | |
1456 | u32 tmp; | |
1457 | ||
1458 | res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, | |
1459 | 0, 1, 100); | |
1460 | if (!res.err) | |
1461 | return 0; | |
1462 | ||
1463 | msleep(50); | |
1464 | } while (--retries); | |
1465 | ||
44b51bbb | 1466 | tb_err(tb, "failed to read root switch config space, giving up\n"); |
f67cf491 MW |
1467 | return -ETIMEDOUT; |
1468 | } | |
1469 | ||
1470 | static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) | |
1471 | { | |
1472 | unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); | |
1473 | u32 cmd; | |
1474 | ||
1475 | do { | |
1476 | pci_read_config_dword(icm->upstream_port, | |
1477 | icm->vnd_cap + PCIE2CIO_CMD, &cmd); | |
1478 | if (!(cmd & PCIE2CIO_CMD_START)) { | |
1479 | if (cmd & PCIE2CIO_CMD_TIMEOUT) | |
1480 | break; | |
1481 | return 0; | |
1482 | } | |
1483 | ||
1484 | msleep(50); | |
1485 | } while (time_before(jiffies, end)); | |
1486 | ||
1487 | return -ETIMEDOUT; | |
1488 | } | |
1489 | ||
1490 | static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, | |
1491 | unsigned int port, unsigned int index, u32 *data) | |
1492 | { | |
1493 | struct pci_dev *pdev = icm->upstream_port; | |
1494 | int ret, vnd_cap = icm->vnd_cap; | |
1495 | u32 cmd; | |
1496 | ||
1497 | cmd = index; | |
1498 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; | |
1499 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; | |
1500 | cmd |= PCIE2CIO_CMD_START; | |
1501 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); | |
1502 | ||
1503 | ret = pci2cio_wait_completion(icm, 5000); | |
1504 | if (ret) | |
1505 | return ret; | |
1506 | ||
1507 | pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); | |
1508 | return 0; | |
1509 | } | |
1510 | ||
1511 | static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, | |
1512 | unsigned int port, unsigned int index, u32 data) | |
1513 | { | |
1514 | struct pci_dev *pdev = icm->upstream_port; | |
1515 | int vnd_cap = icm->vnd_cap; | |
1516 | u32 cmd; | |
1517 | ||
1518 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); | |
1519 | ||
1520 | cmd = index; | |
1521 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; | |
1522 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; | |
1523 | cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; | |
1524 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); | |
1525 | ||
1526 | return pci2cio_wait_completion(icm, 5000); | |
1527 | } | |
1528 | ||
1529 | static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) | |
1530 | { | |
1531 | struct icm *icm = tb_priv(tb); | |
1532 | u32 val; | |
1533 | ||
ea9d7bb7 MW |
1534 | if (!icm->upstream_port) |
1535 | return -ENODEV; | |
1536 | ||
f67cf491 MW |
1537 | /* Put ARC to wait for CIO reset event to happen */ |
1538 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1539 | val |= REG_FW_STS_CIO_RESET_REQ; | |
1540 | iowrite32(val, nhi->iobase + REG_FW_STS); | |
1541 | ||
1542 | /* Re-start ARC */ | |
1543 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1544 | val |= REG_FW_STS_ICM_EN_INVERT; | |
1545 | val |= REG_FW_STS_ICM_EN_CPU; | |
1546 | iowrite32(val, nhi->iobase + REG_FW_STS); | |
1547 | ||
1548 | /* Trigger CIO reset now */ | |
1549 | return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); | |
1550 | } | |
1551 | ||
1552 | static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) | |
1553 | { | |
1554 | unsigned int retries = 10; | |
1555 | int ret; | |
1556 | u32 val; | |
1557 | ||
1558 | /* Check if the ICM firmware is already running */ | |
1559 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1560 | if (val & REG_FW_STS_ICM_EN) | |
1561 | return 0; | |
1562 | ||
1563 | dev_info(&nhi->pdev->dev, "starting ICM firmware\n"); | |
1564 | ||
1565 | ret = icm_firmware_reset(tb, nhi); | |
1566 | if (ret) | |
1567 | return ret; | |
1568 | ||
1569 | /* Wait until the ICM firmware tells us it is up and running */ | |
1570 | do { | |
1571 | /* Check that the ICM firmware is running */ | |
1572 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1573 | if (val & REG_FW_STS_NVM_AUTH_DONE) | |
1574 | return 0; | |
1575 | ||
1576 | msleep(300); | |
1577 | } while (--retries); | |
1578 | ||
1579 | return -ETIMEDOUT; | |
1580 | } | |
1581 | ||
1582 | static int icm_reset_phy_port(struct tb *tb, int phy_port) | |
1583 | { | |
1584 | struct icm *icm = tb_priv(tb); | |
1585 | u32 state0, state1; | |
1586 | int port0, port1; | |
1587 | u32 val0, val1; | |
1588 | int ret; | |
1589 | ||
1590 | if (!icm->upstream_port) | |
1591 | return 0; | |
1592 | ||
1593 | if (phy_port) { | |
1594 | port0 = 3; | |
1595 | port1 = 4; | |
1596 | } else { | |
1597 | port0 = 1; | |
1598 | port1 = 2; | |
1599 | } | |
1600 | ||
1601 | /* | |
1602 | * Read link status of both null ports belonging to a single | |
1603 | * physical port. | |
1604 | */ | |
1605 | ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); | |
1606 | if (ret) | |
1607 | return ret; | |
1608 | ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); | |
1609 | if (ret) | |
1610 | return ret; | |
1611 | ||
1612 | state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; | |
1613 | state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; | |
1614 | state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; | |
1615 | state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; | |
1616 | ||
1617 | /* If they are both up we need to reset them now */ | |
1618 | if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) | |
1619 | return 0; | |
1620 | ||
1621 | val0 |= PHY_PORT_CS1_LINK_DISABLE; | |
1622 | ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); | |
1623 | if (ret) | |
1624 | return ret; | |
1625 | ||
1626 | val1 |= PHY_PORT_CS1_LINK_DISABLE; | |
1627 | ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); | |
1628 | if (ret) | |
1629 | return ret; | |
1630 | ||
1631 | /* Wait a bit and then re-enable both ports */ | |
1632 | usleep_range(10, 100); | |
1633 | ||
1634 | ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); | |
1635 | if (ret) | |
1636 | return ret; | |
1637 | ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); | |
1638 | if (ret) | |
1639 | return ret; | |
1640 | ||
1641 | val0 &= ~PHY_PORT_CS1_LINK_DISABLE; | |
1642 | ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); | |
1643 | if (ret) | |
1644 | return ret; | |
1645 | ||
1646 | val1 &= ~PHY_PORT_CS1_LINK_DISABLE; | |
1647 | return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); | |
1648 | } | |
1649 | ||
1650 | static int icm_firmware_init(struct tb *tb) | |
1651 | { | |
1652 | struct icm *icm = tb_priv(tb); | |
1653 | struct tb_nhi *nhi = tb->nhi; | |
1654 | int ret; | |
1655 | ||
1656 | ret = icm_firmware_start(tb, nhi); | |
1657 | if (ret) { | |
1658 | dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); | |
1659 | return ret; | |
1660 | } | |
1661 | ||
1662 | if (icm->get_mode) { | |
1663 | ret = icm->get_mode(tb); | |
1664 | ||
1665 | switch (ret) { | |
e6b245cc MW |
1666 | case NHI_FW_SAFE_MODE: |
1667 | icm->safe_mode = true; | |
1668 | break; | |
1669 | ||
f67cf491 MW |
1670 | case NHI_FW_CM_MODE: |
1671 | /* Ask ICM to accept all Thunderbolt devices */ | |
1672 | nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); | |
1673 | break; | |
1674 | ||
1675 | default: | |
e4be8c9b MW |
1676 | if (ret < 0) |
1677 | return ret; | |
1678 | ||
f67cf491 MW |
1679 | tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); |
1680 | return -ENODEV; | |
1681 | } | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * Reset both physical ports if there is anything connected to | |
1686 | * them already. | |
1687 | */ | |
1688 | ret = icm_reset_phy_port(tb, 0); | |
1689 | if (ret) | |
1690 | dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); | |
1691 | ret = icm_reset_phy_port(tb, 1); | |
1692 | if (ret) | |
1693 | dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); | |
1694 | ||
1695 | return 0; | |
1696 | } | |
1697 | ||
1698 | static int icm_driver_ready(struct tb *tb) | |
1699 | { | |
e6b245cc | 1700 | struct icm *icm = tb_priv(tb); |
f67cf491 MW |
1701 | int ret; |
1702 | ||
1703 | ret = icm_firmware_init(tb); | |
1704 | if (ret) | |
1705 | return ret; | |
1706 | ||
e6b245cc MW |
1707 | if (icm->safe_mode) { |
1708 | tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); | |
1709 | tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); | |
1710 | tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); | |
1711 | return 0; | |
1712 | } | |
1713 | ||
2d8ff0b5 MW |
1714 | ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, |
1715 | &icm->rpm); | |
9aaa3b8b MW |
1716 | if (ret) |
1717 | return ret; | |
1718 | ||
1719 | /* | |
1720 | * Make sure the number of supported preboot ACL matches what we | |
1721 | * expect or disable the whole feature. | |
1722 | */ | |
1723 | if (tb->nboot_acl > icm->max_boot_acl) | |
1724 | tb->nboot_acl = 0; | |
1725 | ||
1726 | return 0; | |
f67cf491 MW |
1727 | } |
1728 | ||
1729 | static int icm_suspend(struct tb *tb) | |
1730 | { | |
d04522fa | 1731 | struct icm *icm = tb_priv(tb); |
a684c5b1 | 1732 | |
d04522fa MW |
1733 | if (icm->save_devices) |
1734 | icm->save_devices(tb); | |
a684c5b1 | 1735 | |
d04522fa | 1736 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); |
a684c5b1 | 1737 | return 0; |
f67cf491 MW |
1738 | } |
1739 | ||
1740 | /* | |
1741 | * Mark all switches (except root switch) below this one unplugged. ICM | |
1742 | * firmware will send us an updated list of switches after we have send | |
1743 | * it driver ready command. If a switch is not in that list it will be | |
1744 | * removed when we perform rescan. | |
1745 | */ | |
1746 | static void icm_unplug_children(struct tb_switch *sw) | |
1747 | { | |
1748 | unsigned int i; | |
1749 | ||
1750 | if (tb_route(sw)) | |
1751 | sw->is_unplugged = true; | |
1752 | ||
1753 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1754 | struct tb_port *port = &sw->ports[i]; | |
1755 | ||
1756 | if (tb_is_upstream_port(port)) | |
1757 | continue; | |
d1ff7024 MW |
1758 | if (port->xdomain) { |
1759 | port->xdomain->is_unplugged = true; | |
1760 | continue; | |
1761 | } | |
f67cf491 MW |
1762 | if (!port->remote) |
1763 | continue; | |
1764 | ||
1765 | icm_unplug_children(port->remote->sw); | |
1766 | } | |
1767 | } | |
1768 | ||
1769 | static void icm_free_unplugged_children(struct tb_switch *sw) | |
1770 | { | |
1771 | unsigned int i; | |
1772 | ||
1773 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1774 | struct tb_port *port = &sw->ports[i]; | |
1775 | ||
1776 | if (tb_is_upstream_port(port)) | |
1777 | continue; | |
d1ff7024 MW |
1778 | |
1779 | if (port->xdomain && port->xdomain->is_unplugged) { | |
1780 | tb_xdomain_remove(port->xdomain); | |
1781 | port->xdomain = NULL; | |
1782 | continue; | |
1783 | } | |
1784 | ||
f67cf491 MW |
1785 | if (!port->remote) |
1786 | continue; | |
1787 | ||
1788 | if (port->remote->sw->is_unplugged) { | |
1789 | tb_switch_remove(port->remote->sw); | |
1790 | port->remote = NULL; | |
1791 | } else { | |
1792 | icm_free_unplugged_children(port->remote->sw); | |
1793 | } | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | static void icm_rescan_work(struct work_struct *work) | |
1798 | { | |
1799 | struct icm *icm = container_of(work, struct icm, rescan_work.work); | |
1800 | struct tb *tb = icm_to_tb(icm); | |
1801 | ||
1802 | mutex_lock(&tb->lock); | |
1803 | if (tb->root_switch) | |
1804 | icm_free_unplugged_children(tb->root_switch); | |
1805 | mutex_unlock(&tb->lock); | |
1806 | } | |
1807 | ||
1808 | static void icm_complete(struct tb *tb) | |
1809 | { | |
1810 | struct icm *icm = tb_priv(tb); | |
1811 | ||
1812 | if (tb->nhi->going_away) | |
1813 | return; | |
1814 | ||
1815 | icm_unplug_children(tb->root_switch); | |
1816 | ||
1817 | /* | |
1818 | * Now all existing children should be resumed, start events | |
1819 | * from ICM to get updated status. | |
1820 | */ | |
2d8ff0b5 | 1821 | __icm_driver_ready(tb, NULL, NULL, NULL); |
f67cf491 MW |
1822 | |
1823 | /* | |
1824 | * We do not get notifications of devices that have been | |
1825 | * unplugged during suspend so schedule rescan to clean them up | |
1826 | * if any. | |
1827 | */ | |
1828 | queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); | |
1829 | } | |
1830 | ||
2d8ff0b5 MW |
1831 | static int icm_runtime_suspend(struct tb *tb) |
1832 | { | |
1833 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | |
1834 | return 0; | |
1835 | } | |
1836 | ||
1837 | static int icm_runtime_resume(struct tb *tb) | |
1838 | { | |
1839 | /* | |
1840 | * We can reuse the same resume functionality than with system | |
1841 | * suspend. | |
1842 | */ | |
1843 | icm_complete(tb); | |
1844 | return 0; | |
1845 | } | |
1846 | ||
f67cf491 MW |
1847 | static int icm_start(struct tb *tb) |
1848 | { | |
e6b245cc | 1849 | struct icm *icm = tb_priv(tb); |
f67cf491 MW |
1850 | int ret; |
1851 | ||
e6b245cc MW |
1852 | if (icm->safe_mode) |
1853 | tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); | |
1854 | else | |
1855 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); | |
f67cf491 MW |
1856 | if (!tb->root_switch) |
1857 | return -ENODEV; | |
1858 | ||
e6b245cc MW |
1859 | /* |
1860 | * NVM upgrade has not been tested on Apple systems and they | |
1861 | * don't provide images publicly either. To be on the safe side | |
1862 | * prevent root switch NVM upgrade on Macs for now. | |
1863 | */ | |
630b3aff | 1864 | tb->root_switch->no_nvm_upgrade = x86_apple_machine; |
2d8ff0b5 | 1865 | tb->root_switch->rpm = icm->rpm; |
e6b245cc | 1866 | |
f67cf491 | 1867 | ret = tb_switch_add(tb->root_switch); |
d1ff7024 | 1868 | if (ret) { |
f67cf491 | 1869 | tb_switch_put(tb->root_switch); |
d1ff7024 MW |
1870 | tb->root_switch = NULL; |
1871 | } | |
f67cf491 MW |
1872 | |
1873 | return ret; | |
1874 | } | |
1875 | ||
1876 | static void icm_stop(struct tb *tb) | |
1877 | { | |
1878 | struct icm *icm = tb_priv(tb); | |
1879 | ||
1880 | cancel_delayed_work(&icm->rescan_work); | |
1881 | tb_switch_remove(tb->root_switch); | |
1882 | tb->root_switch = NULL; | |
1883 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | |
1884 | } | |
1885 | ||
e6b245cc MW |
1886 | static int icm_disconnect_pcie_paths(struct tb *tb) |
1887 | { | |
1888 | return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); | |
1889 | } | |
1890 | ||
9aaa3b8b | 1891 | /* Falcon Ridge */ |
f67cf491 MW |
1892 | static const struct tb_cm_ops icm_fr_ops = { |
1893 | .driver_ready = icm_driver_ready, | |
1894 | .start = icm_start, | |
1895 | .stop = icm_stop, | |
1896 | .suspend = icm_suspend, | |
1897 | .complete = icm_complete, | |
1898 | .handle_event = icm_handle_event, | |
1899 | .approve_switch = icm_fr_approve_switch, | |
1900 | .add_switch_key = icm_fr_add_switch_key, | |
1901 | .challenge_switch_key = icm_fr_challenge_switch_key, | |
e6b245cc | 1902 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
d1ff7024 MW |
1903 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, |
1904 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, | |
f67cf491 MW |
1905 | }; |
1906 | ||
9aaa3b8b MW |
1907 | /* Alpine Ridge */ |
1908 | static const struct tb_cm_ops icm_ar_ops = { | |
1909 | .driver_ready = icm_driver_ready, | |
1910 | .start = icm_start, | |
1911 | .stop = icm_stop, | |
1912 | .suspend = icm_suspend, | |
1913 | .complete = icm_complete, | |
2d8ff0b5 MW |
1914 | .runtime_suspend = icm_runtime_suspend, |
1915 | .runtime_resume = icm_runtime_resume, | |
9aaa3b8b MW |
1916 | .handle_event = icm_handle_event, |
1917 | .get_boot_acl = icm_ar_get_boot_acl, | |
1918 | .set_boot_acl = icm_ar_set_boot_acl, | |
1919 | .approve_switch = icm_fr_approve_switch, | |
1920 | .add_switch_key = icm_fr_add_switch_key, | |
1921 | .challenge_switch_key = icm_fr_challenge_switch_key, | |
1922 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | |
1923 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, | |
1924 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, | |
1925 | }; | |
1926 | ||
4bac471d RM |
1927 | /* Titan Ridge */ |
1928 | static const struct tb_cm_ops icm_tr_ops = { | |
1929 | .driver_ready = icm_driver_ready, | |
1930 | .start = icm_start, | |
1931 | .stop = icm_stop, | |
1932 | .suspend = icm_suspend, | |
1933 | .complete = icm_complete, | |
2d8ff0b5 MW |
1934 | .runtime_suspend = icm_runtime_suspend, |
1935 | .runtime_resume = icm_runtime_resume, | |
4bac471d RM |
1936 | .handle_event = icm_handle_event, |
1937 | .get_boot_acl = icm_ar_get_boot_acl, | |
1938 | .set_boot_acl = icm_ar_set_boot_acl, | |
1939 | .approve_switch = icm_tr_approve_switch, | |
1940 | .add_switch_key = icm_tr_add_switch_key, | |
1941 | .challenge_switch_key = icm_tr_challenge_switch_key, | |
1942 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | |
1943 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, | |
1944 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, | |
1945 | }; | |
1946 | ||
f67cf491 MW |
1947 | struct tb *icm_probe(struct tb_nhi *nhi) |
1948 | { | |
1949 | struct icm *icm; | |
1950 | struct tb *tb; | |
1951 | ||
1952 | tb = tb_domain_alloc(nhi, sizeof(struct icm)); | |
1953 | if (!tb) | |
1954 | return NULL; | |
1955 | ||
1956 | icm = tb_priv(tb); | |
1957 | INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); | |
1958 | mutex_init(&icm->request_lock); | |
1959 | ||
1960 | switch (nhi->pdev->device) { | |
1961 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: | |
1962 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: | |
1963 | icm->is_supported = icm_fr_is_supported; | |
1964 | icm->get_route = icm_fr_get_route; | |
d04522fa | 1965 | icm->save_devices = icm_fr_save_devices; |
3080e197 | 1966 | icm->driver_ready = icm_fr_driver_ready; |
f67cf491 MW |
1967 | icm->device_connected = icm_fr_device_connected; |
1968 | icm->device_disconnected = icm_fr_device_disconnected; | |
d1ff7024 MW |
1969 | icm->xdomain_connected = icm_fr_xdomain_connected; |
1970 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; | |
f67cf491 MW |
1971 | tb->cm_ops = &icm_fr_ops; |
1972 | break; | |
1973 | ||
1974 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: | |
1975 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: | |
1976 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: | |
1977 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: | |
1978 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: | |
9aaa3b8b | 1979 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
f67cf491 MW |
1980 | icm->is_supported = icm_ar_is_supported; |
1981 | icm->get_mode = icm_ar_get_mode; | |
1982 | icm->get_route = icm_ar_get_route; | |
d04522fa | 1983 | icm->save_devices = icm_fr_save_devices; |
9aaa3b8b | 1984 | icm->driver_ready = icm_ar_driver_ready; |
f67cf491 MW |
1985 | icm->device_connected = icm_fr_device_connected; |
1986 | icm->device_disconnected = icm_fr_device_disconnected; | |
d1ff7024 MW |
1987 | icm->xdomain_connected = icm_fr_xdomain_connected; |
1988 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; | |
9aaa3b8b | 1989 | tb->cm_ops = &icm_ar_ops; |
f67cf491 | 1990 | break; |
4bac471d RM |
1991 | |
1992 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: | |
1993 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: | |
1994 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; | |
1995 | icm->is_supported = icm_ar_is_supported; | |
1996 | icm->get_mode = icm_ar_get_mode; | |
1997 | icm->driver_ready = icm_tr_driver_ready; | |
1998 | icm->device_connected = icm_tr_device_connected; | |
1999 | icm->device_disconnected = icm_tr_device_disconnected; | |
2000 | icm->xdomain_connected = icm_tr_xdomain_connected; | |
2001 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; | |
2002 | tb->cm_ops = &icm_tr_ops; | |
2003 | break; | |
f67cf491 MW |
2004 | } |
2005 | ||
2006 | if (!icm->is_supported || !icm->is_supported(tb)) { | |
2007 | dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); | |
2008 | tb_domain_put(tb); | |
2009 | return NULL; | |
2010 | } | |
2011 | ||
2012 | return tb; | |
2013 | } |