Commit | Line | Data |
---|---|---|
fd3b339c | 1 | // SPDX-License-Identifier: GPL-2.0 |
f67cf491 MW |
2 | /* |
3 | * Internal Thunderbolt Connection Manager. This is a firmware running on | |
4 | * the Thunderbolt host controller performing most of the low-level | |
5 | * handling. | |
6 | * | |
7 | * Copyright (C) 2017, Intel Corporation | |
8 | * Authors: Michael Jamet <michael.jamet@intel.com> | |
9 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
f67cf491 MW |
10 | */ |
11 | ||
12 | #include <linux/delay.h> | |
f67cf491 MW |
13 | #include <linux/mutex.h> |
14 | #include <linux/pci.h> | |
2d8ff0b5 | 15 | #include <linux/pm_runtime.h> |
630b3aff | 16 | #include <linux/platform_data/x86/apple.h> |
f67cf491 MW |
17 | #include <linux/sizes.h> |
18 | #include <linux/slab.h> | |
19 | #include <linux/workqueue.h> | |
20 | ||
21 | #include "ctl.h" | |
22 | #include "nhi_regs.h" | |
23 | #include "tb.h" | |
24 | ||
25 | #define PCIE2CIO_CMD 0x30 | |
26 | #define PCIE2CIO_CMD_TIMEOUT BIT(31) | |
27 | #define PCIE2CIO_CMD_START BIT(30) | |
28 | #define PCIE2CIO_CMD_WRITE BIT(21) | |
29 | #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) | |
30 | #define PCIE2CIO_CMD_CS_SHIFT 19 | |
31 | #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) | |
32 | #define PCIE2CIO_CMD_PORT_SHIFT 13 | |
33 | ||
34 | #define PCIE2CIO_WRDATA 0x34 | |
35 | #define PCIE2CIO_RDDATA 0x38 | |
36 | ||
37 | #define PHY_PORT_CS1 0x37 | |
38 | #define PHY_PORT_CS1_LINK_DISABLE BIT(14) | |
39 | #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) | |
40 | #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 | |
41 | ||
0b0a0bd0 MW |
42 | #define ICM_TIMEOUT 5000 /* ms */ |
43 | #define ICM_APPROVE_TIMEOUT 10000 /* ms */ | |
f67cf491 | 44 | #define ICM_MAX_LINK 4 |
f67cf491 MW |
45 | |
46 | /** | |
47 | * struct icm - Internal connection manager private data | |
48 | * @request_lock: Makes sure only one message is send to ICM at time | |
49 | * @rescan_work: Work used to rescan the surviving switches after resume | |
50 | * @upstream_port: Pointer to the PCIe upstream port this host | |
51 | * controller is connected. This is only set for systems | |
52 | * where ICM needs to be started manually | |
53 | * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides | |
54 | * (only set when @upstream_port is not %NULL) | |
e6b245cc | 55 | * @safe_mode: ICM is in safe mode |
9aaa3b8b | 56 | * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) |
2d8ff0b5 | 57 | * @rpm: Does the controller support runtime PM (RTD3) |
f437c24b | 58 | * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller |
3cdb9446 | 59 | * @veto: Is RTD3 veto in effect |
f67cf491 | 60 | * @is_supported: Checks if we can support ICM on this controller |
0d53827d | 61 | * @cio_reset: Trigger CIO reset |
f67cf491 MW |
62 | * @get_mode: Read and return the ICM firmware mode (optional) |
63 | * @get_route: Find a route string for given switch | |
d04522fa | 64 | * @save_devices: Ask ICM to save devices to ACL when suspending (optional) |
3080e197 | 65 | * @driver_ready: Send driver ready message to ICM |
3cdb9446 | 66 | * @set_uuid: Set UUID for the root switch (optional) |
f67cf491 MW |
67 | * @device_connected: Handle device connected ICM message |
68 | * @device_disconnected: Handle device disconnected ICM message | |
d1ff7024 MW |
69 | * @xdomain_connected - Handle XDomain connected ICM message |
70 | * @xdomain_disconnected - Handle XDomain disconnected ICM message | |
3cdb9446 | 71 | * @rtd3_veto: Handle RTD3 veto notification ICM message |
f67cf491 MW |
72 | */ |
73 | struct icm { | |
74 | struct mutex request_lock; | |
75 | struct delayed_work rescan_work; | |
76 | struct pci_dev *upstream_port; | |
9aaa3b8b | 77 | size_t max_boot_acl; |
f67cf491 | 78 | int vnd_cap; |
e6b245cc | 79 | bool safe_mode; |
2d8ff0b5 | 80 | bool rpm; |
f437c24b | 81 | bool can_upgrade_nvm; |
3cdb9446 | 82 | bool veto; |
f67cf491 | 83 | bool (*is_supported)(struct tb *tb); |
0d53827d | 84 | int (*cio_reset)(struct tb *tb); |
f67cf491 MW |
85 | int (*get_mode)(struct tb *tb); |
86 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); | |
d04522fa | 87 | void (*save_devices)(struct tb *tb); |
3080e197 | 88 | int (*driver_ready)(struct tb *tb, |
9aaa3b8b | 89 | enum tb_security_level *security_level, |
2d8ff0b5 | 90 | size_t *nboot_acl, bool *rpm); |
3cdb9446 | 91 | void (*set_uuid)(struct tb *tb); |
f67cf491 MW |
92 | void (*device_connected)(struct tb *tb, |
93 | const struct icm_pkg_header *hdr); | |
94 | void (*device_disconnected)(struct tb *tb, | |
95 | const struct icm_pkg_header *hdr); | |
d1ff7024 MW |
96 | void (*xdomain_connected)(struct tb *tb, |
97 | const struct icm_pkg_header *hdr); | |
98 | void (*xdomain_disconnected)(struct tb *tb, | |
99 | const struct icm_pkg_header *hdr); | |
3cdb9446 | 100 | void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); |
f67cf491 MW |
101 | }; |
102 | ||
103 | struct icm_notification { | |
104 | struct work_struct work; | |
105 | struct icm_pkg_header *pkg; | |
106 | struct tb *tb; | |
107 | }; | |
108 | ||
2d8ff0b5 MW |
109 | struct ep_name_entry { |
110 | u8 len; | |
111 | u8 type; | |
112 | u8 data[0]; | |
113 | }; | |
114 | ||
115 | #define EP_NAME_INTEL_VSS 0x10 | |
116 | ||
117 | /* Intel Vendor specific structure */ | |
118 | struct intel_vss { | |
119 | u16 vendor; | |
120 | u16 model; | |
121 | u8 mc; | |
122 | u8 flags; | |
123 | u16 pci_devid; | |
124 | u32 nvm_version; | |
125 | }; | |
126 | ||
127 | #define INTEL_VSS_FLAGS_RTD3 BIT(0) | |
128 | ||
129 | static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) | |
130 | { | |
131 | const void *end = ep_name + size; | |
132 | ||
133 | while (ep_name < end) { | |
134 | const struct ep_name_entry *ep = ep_name; | |
135 | ||
136 | if (!ep->len) | |
137 | break; | |
138 | if (ep_name + ep->len > end) | |
139 | break; | |
140 | ||
141 | if (ep->type == EP_NAME_INTEL_VSS) | |
142 | return (const struct intel_vss *)ep->data; | |
143 | ||
144 | ep_name += ep->len; | |
145 | } | |
146 | ||
147 | return NULL; | |
148 | } | |
149 | ||
f67cf491 MW |
150 | static inline struct tb *icm_to_tb(struct icm *icm) |
151 | { | |
152 | return ((void *)icm - sizeof(struct tb)); | |
153 | } | |
154 | ||
155 | static inline u8 phy_port_from_route(u64 route, u8 depth) | |
156 | { | |
d1ff7024 MW |
157 | u8 link; |
158 | ||
159 | link = depth ? route >> ((depth - 1) * 8) : route; | |
160 | return tb_phy_port_from_link(link); | |
f67cf491 MW |
161 | } |
162 | ||
163 | static inline u8 dual_link_from_link(u8 link) | |
164 | { | |
165 | return link ? ((link - 1) ^ 0x01) + 1 : 0; | |
166 | } | |
167 | ||
168 | static inline u64 get_route(u32 route_hi, u32 route_lo) | |
169 | { | |
170 | return (u64)route_hi << 32 | route_lo; | |
171 | } | |
172 | ||
4bac471d RM |
173 | static inline u64 get_parent_route(u64 route) |
174 | { | |
175 | int depth = tb_route_length(route); | |
176 | return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; | |
177 | } | |
178 | ||
0d53827d MW |
179 | static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) |
180 | { | |
181 | unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); | |
182 | u32 cmd; | |
183 | ||
184 | do { | |
185 | pci_read_config_dword(icm->upstream_port, | |
186 | icm->vnd_cap + PCIE2CIO_CMD, &cmd); | |
187 | if (!(cmd & PCIE2CIO_CMD_START)) { | |
188 | if (cmd & PCIE2CIO_CMD_TIMEOUT) | |
189 | break; | |
190 | return 0; | |
191 | } | |
192 | ||
193 | msleep(50); | |
194 | } while (time_before(jiffies, end)); | |
195 | ||
196 | return -ETIMEDOUT; | |
197 | } | |
198 | ||
199 | static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, | |
200 | unsigned int port, unsigned int index, u32 *data) | |
201 | { | |
202 | struct pci_dev *pdev = icm->upstream_port; | |
203 | int ret, vnd_cap = icm->vnd_cap; | |
204 | u32 cmd; | |
205 | ||
206 | cmd = index; | |
207 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; | |
208 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; | |
209 | cmd |= PCIE2CIO_CMD_START; | |
210 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); | |
211 | ||
212 | ret = pci2cio_wait_completion(icm, 5000); | |
213 | if (ret) | |
214 | return ret; | |
215 | ||
216 | pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, | |
221 | unsigned int port, unsigned int index, u32 data) | |
222 | { | |
223 | struct pci_dev *pdev = icm->upstream_port; | |
224 | int vnd_cap = icm->vnd_cap; | |
225 | u32 cmd; | |
226 | ||
227 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); | |
228 | ||
229 | cmd = index; | |
230 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; | |
231 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; | |
232 | cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; | |
233 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); | |
234 | ||
235 | return pci2cio_wait_completion(icm, 5000); | |
236 | } | |
237 | ||
f67cf491 MW |
238 | static bool icm_match(const struct tb_cfg_request *req, |
239 | const struct ctl_pkg *pkg) | |
240 | { | |
241 | const struct icm_pkg_header *res_hdr = pkg->buffer; | |
242 | const struct icm_pkg_header *req_hdr = req->request; | |
243 | ||
244 | if (pkg->frame.eof != req->response_type) | |
245 | return false; | |
246 | if (res_hdr->code != req_hdr->code) | |
247 | return false; | |
248 | ||
249 | return true; | |
250 | } | |
251 | ||
252 | static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) | |
253 | { | |
254 | const struct icm_pkg_header *hdr = pkg->buffer; | |
255 | ||
256 | if (hdr->packet_id < req->npackets) { | |
257 | size_t offset = hdr->packet_id * req->response_size; | |
258 | ||
259 | memcpy(req->response + offset, pkg->buffer, req->response_size); | |
260 | } | |
261 | ||
262 | return hdr->packet_id == hdr->total_packets - 1; | |
263 | } | |
264 | ||
265 | static int icm_request(struct tb *tb, const void *request, size_t request_size, | |
266 | void *response, size_t response_size, size_t npackets, | |
267 | unsigned int timeout_msec) | |
268 | { | |
269 | struct icm *icm = tb_priv(tb); | |
270 | int retries = 3; | |
271 | ||
272 | do { | |
273 | struct tb_cfg_request *req; | |
274 | struct tb_cfg_result res; | |
275 | ||
276 | req = tb_cfg_request_alloc(); | |
277 | if (!req) | |
278 | return -ENOMEM; | |
279 | ||
280 | req->match = icm_match; | |
281 | req->copy = icm_copy; | |
282 | req->request = request; | |
283 | req->request_size = request_size; | |
284 | req->request_type = TB_CFG_PKG_ICM_CMD; | |
285 | req->response = response; | |
286 | req->npackets = npackets; | |
287 | req->response_size = response_size; | |
288 | req->response_type = TB_CFG_PKG_ICM_RESP; | |
289 | ||
290 | mutex_lock(&icm->request_lock); | |
291 | res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); | |
292 | mutex_unlock(&icm->request_lock); | |
293 | ||
294 | tb_cfg_request_put(req); | |
295 | ||
296 | if (res.err != -ETIMEDOUT) | |
297 | return res.err == 1 ? -EIO : res.err; | |
298 | ||
299 | usleep_range(20, 50); | |
300 | } while (retries--); | |
301 | ||
302 | return -ETIMEDOUT; | |
303 | } | |
304 | ||
3cdb9446 MW |
305 | /* |
306 | * If rescan is queued to run (we are resuming), postpone it to give the | |
307 | * firmware some more time to send device connected notifications for next | |
308 | * devices in the chain. | |
309 | */ | |
310 | static void icm_postpone_rescan(struct tb *tb) | |
311 | { | |
312 | struct icm *icm = tb_priv(tb); | |
313 | ||
314 | if (delayed_work_pending(&icm->rescan_work)) | |
315 | mod_delayed_work(tb->wq, &icm->rescan_work, | |
316 | msecs_to_jiffies(500)); | |
317 | } | |
318 | ||
319 | static void icm_veto_begin(struct tb *tb) | |
320 | { | |
321 | struct icm *icm = tb_priv(tb); | |
322 | ||
323 | if (!icm->veto) { | |
324 | icm->veto = true; | |
325 | /* Keep the domain powered while veto is in effect */ | |
326 | pm_runtime_get(&tb->dev); | |
327 | } | |
328 | } | |
329 | ||
330 | static void icm_veto_end(struct tb *tb) | |
331 | { | |
332 | struct icm *icm = tb_priv(tb); | |
333 | ||
334 | if (icm->veto) { | |
335 | icm->veto = false; | |
336 | /* Allow the domain suspend now */ | |
337 | pm_runtime_mark_last_busy(&tb->dev); | |
338 | pm_runtime_put_autosuspend(&tb->dev); | |
339 | } | |
340 | } | |
341 | ||
f67cf491 MW |
342 | static bool icm_fr_is_supported(struct tb *tb) |
343 | { | |
630b3aff | 344 | return !x86_apple_machine; |
f67cf491 MW |
345 | } |
346 | ||
347 | static inline int icm_fr_get_switch_index(u32 port) | |
348 | { | |
349 | int index; | |
350 | ||
351 | if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) | |
352 | return 0; | |
353 | ||
354 | index = port >> ICM_PORT_INDEX_SHIFT; | |
355 | return index != 0xff ? index : 0; | |
356 | } | |
357 | ||
358 | static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) | |
359 | { | |
360 | struct icm_fr_pkg_get_topology_response *switches, *sw; | |
361 | struct icm_fr_pkg_get_topology request = { | |
362 | .hdr = { .code = ICM_GET_TOPOLOGY }, | |
363 | }; | |
364 | size_t npackets = ICM_GET_TOPOLOGY_PACKETS; | |
365 | int ret, index; | |
366 | u8 i; | |
367 | ||
368 | switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); | |
369 | if (!switches) | |
370 | return -ENOMEM; | |
371 | ||
372 | ret = icm_request(tb, &request, sizeof(request), switches, | |
373 | sizeof(*switches), npackets, ICM_TIMEOUT); | |
374 | if (ret) | |
375 | goto err_free; | |
376 | ||
377 | sw = &switches[0]; | |
378 | index = icm_fr_get_switch_index(sw->ports[link]); | |
379 | if (!index) { | |
380 | ret = -ENODEV; | |
381 | goto err_free; | |
382 | } | |
383 | ||
384 | sw = &switches[index]; | |
385 | for (i = 1; i < depth; i++) { | |
386 | unsigned int j; | |
387 | ||
388 | if (!(sw->first_data & ICM_SWITCH_USED)) { | |
389 | ret = -ENODEV; | |
390 | goto err_free; | |
391 | } | |
392 | ||
393 | for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { | |
394 | index = icm_fr_get_switch_index(sw->ports[j]); | |
395 | if (index > sw->switch_index) { | |
396 | sw = &switches[index]; | |
397 | break; | |
398 | } | |
399 | } | |
400 | } | |
401 | ||
402 | *route = get_route(sw->route_hi, sw->route_lo); | |
403 | ||
404 | err_free: | |
405 | kfree(switches); | |
406 | return ret; | |
407 | } | |
408 | ||
d04522fa MW |
409 | static void icm_fr_save_devices(struct tb *tb) |
410 | { | |
411 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); | |
412 | } | |
413 | ||
3080e197 | 414 | static int |
9aaa3b8b | 415 | icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
2d8ff0b5 | 416 | size_t *nboot_acl, bool *rpm) |
3080e197 MW |
417 | { |
418 | struct icm_fr_pkg_driver_ready_response reply; | |
419 | struct icm_pkg_driver_ready request = { | |
420 | .hdr.code = ICM_DRIVER_READY, | |
421 | }; | |
422 | int ret; | |
423 | ||
424 | memset(&reply, 0, sizeof(reply)); | |
425 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
426 | 1, ICM_TIMEOUT); | |
427 | if (ret) | |
428 | return ret; | |
429 | ||
430 | if (security_level) | |
431 | *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
f67cf491 MW |
436 | static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) |
437 | { | |
438 | struct icm_fr_pkg_approve_device request; | |
439 | struct icm_fr_pkg_approve_device reply; | |
440 | int ret; | |
441 | ||
442 | memset(&request, 0, sizeof(request)); | |
443 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
444 | request.hdr.code = ICM_APPROVE_DEVICE; | |
445 | request.connection_id = sw->connection_id; | |
446 | request.connection_key = sw->connection_key; | |
447 | ||
448 | memset(&reply, 0, sizeof(reply)); | |
449 | /* Use larger timeout as establishing tunnels can take some time */ | |
450 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
0b0a0bd0 | 451 | 1, ICM_APPROVE_TIMEOUT); |
f67cf491 MW |
452 | if (ret) |
453 | return ret; | |
454 | ||
455 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
456 | tb_warn(tb, "PCIe tunnel creation failed\n"); | |
457 | return -EIO; | |
458 | } | |
459 | ||
460 | return 0; | |
461 | } | |
462 | ||
463 | static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) | |
464 | { | |
465 | struct icm_fr_pkg_add_device_key request; | |
466 | struct icm_fr_pkg_add_device_key_response reply; | |
467 | int ret; | |
468 | ||
469 | memset(&request, 0, sizeof(request)); | |
470 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
471 | request.hdr.code = ICM_ADD_DEVICE_KEY; | |
472 | request.connection_id = sw->connection_id; | |
473 | request.connection_key = sw->connection_key; | |
474 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); | |
475 | ||
476 | memset(&reply, 0, sizeof(reply)); | |
477 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
478 | 1, ICM_TIMEOUT); | |
479 | if (ret) | |
480 | return ret; | |
481 | ||
482 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
483 | tb_warn(tb, "Adding key to switch failed\n"); | |
484 | return -EIO; | |
485 | } | |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
490 | static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, | |
491 | const u8 *challenge, u8 *response) | |
492 | { | |
493 | struct icm_fr_pkg_challenge_device request; | |
494 | struct icm_fr_pkg_challenge_device_response reply; | |
495 | int ret; | |
496 | ||
497 | memset(&request, 0, sizeof(request)); | |
498 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
499 | request.hdr.code = ICM_CHALLENGE_DEVICE; | |
500 | request.connection_id = sw->connection_id; | |
501 | request.connection_key = sw->connection_key; | |
502 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); | |
503 | ||
504 | memset(&reply, 0, sizeof(reply)); | |
505 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
506 | 1, ICM_TIMEOUT); | |
507 | if (ret) | |
508 | return ret; | |
509 | ||
510 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
511 | return -EKEYREJECTED; | |
512 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) | |
513 | return -ENOKEY; | |
514 | ||
515 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); | |
516 | ||
517 | return 0; | |
518 | } | |
519 | ||
d1ff7024 MW |
520 | static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
521 | { | |
522 | struct icm_fr_pkg_approve_xdomain_response reply; | |
523 | struct icm_fr_pkg_approve_xdomain request; | |
524 | int ret; | |
525 | ||
526 | memset(&request, 0, sizeof(request)); | |
527 | request.hdr.code = ICM_APPROVE_XDOMAIN; | |
528 | request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; | |
529 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
530 | ||
531 | request.transmit_path = xd->transmit_path; | |
532 | request.transmit_ring = xd->transmit_ring; | |
533 | request.receive_path = xd->receive_path; | |
534 | request.receive_ring = xd->receive_ring; | |
535 | ||
536 | memset(&reply, 0, sizeof(reply)); | |
537 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
538 | 1, ICM_TIMEOUT); | |
539 | if (ret) | |
540 | return ret; | |
541 | ||
542 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
543 | return -EIO; | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
548 | static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
549 | { | |
550 | u8 phy_port; | |
551 | u8 cmd; | |
552 | ||
553 | phy_port = tb_phy_port_from_link(xd->link); | |
554 | if (phy_port == 0) | |
555 | cmd = NHI_MAILBOX_DISCONNECT_PA; | |
556 | else | |
557 | cmd = NHI_MAILBOX_DISCONNECT_PB; | |
558 | ||
559 | nhi_mailbox_cmd(tb->nhi, cmd, 1); | |
560 | usleep_range(10, 50); | |
561 | nhi_mailbox_cmd(tb->nhi, cmd, 2); | |
562 | return 0; | |
563 | } | |
564 | ||
3cdb9446 MW |
565 | static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route, |
566 | const uuid_t *uuid, const u8 *ep_name, | |
567 | size_t ep_name_size, u8 connection_id, | |
568 | u8 connection_key, u8 link, u8 depth, | |
569 | enum tb_security_level security_level, | |
570 | bool authorized, bool boot) | |
ee487dd2 | 571 | { |
2d8ff0b5 | 572 | const struct intel_vss *vss; |
ee487dd2 | 573 | struct tb_switch *sw; |
3cdb9446 | 574 | int ret; |
ee487dd2 | 575 | |
2d8ff0b5 MW |
576 | pm_runtime_get_sync(&parent_sw->dev); |
577 | ||
ee487dd2 | 578 | sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); |
444ac384 | 579 | if (IS_ERR(sw)) |
2d8ff0b5 | 580 | goto out; |
ee487dd2 MW |
581 | |
582 | sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); | |
fd21b79e AP |
583 | if (!sw->uuid) { |
584 | tb_sw_warn(sw, "cannot allocate memory for switch\n"); | |
585 | tb_switch_put(sw); | |
586 | goto out; | |
587 | } | |
ee487dd2 MW |
588 | sw->connection_id = connection_id; |
589 | sw->connection_key = connection_key; | |
590 | sw->link = link; | |
591 | sw->depth = depth; | |
592 | sw->authorized = authorized; | |
593 | sw->security_level = security_level; | |
14862ee3 | 594 | sw->boot = boot; |
4f7c2e0d | 595 | init_completion(&sw->rpm_complete); |
ee487dd2 | 596 | |
2d8ff0b5 MW |
597 | vss = parse_intel_vss(ep_name, ep_name_size); |
598 | if (vss) | |
599 | sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); | |
600 | ||
ee487dd2 MW |
601 | /* Link the two switches now */ |
602 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); | |
603 | tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); | |
604 | ||
3cdb9446 MW |
605 | ret = tb_switch_add(sw); |
606 | if (ret) { | |
ee487dd2 MW |
607 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; |
608 | tb_switch_put(sw); | |
3cdb9446 | 609 | sw = ERR_PTR(ret); |
ee487dd2 | 610 | } |
2d8ff0b5 MW |
611 | |
612 | out: | |
613 | pm_runtime_mark_last_busy(&parent_sw->dev); | |
614 | pm_runtime_put_autosuspend(&parent_sw->dev); | |
3cdb9446 MW |
615 | |
616 | return sw; | |
ee487dd2 MW |
617 | } |
618 | ||
619 | static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, | |
620 | u64 route, u8 connection_id, u8 connection_key, | |
14862ee3 | 621 | u8 link, u8 depth, bool boot) |
ee487dd2 MW |
622 | { |
623 | /* Disconnect from parent */ | |
624 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
625 | /* Re-connect via updated port*/ | |
626 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); | |
627 | ||
628 | /* Update with the new addressing information */ | |
629 | sw->config.route_hi = upper_32_bits(route); | |
630 | sw->config.route_lo = lower_32_bits(route); | |
631 | sw->connection_id = connection_id; | |
632 | sw->connection_key = connection_key; | |
633 | sw->link = link; | |
634 | sw->depth = depth; | |
14862ee3 | 635 | sw->boot = boot; |
ee487dd2 MW |
636 | |
637 | /* This switch still exists */ | |
638 | sw->is_unplugged = false; | |
4f7c2e0d MW |
639 | |
640 | /* Runtime resume is now complete */ | |
641 | complete(&sw->rpm_complete); | |
ee487dd2 MW |
642 | } |
643 | ||
f67cf491 MW |
644 | static void remove_switch(struct tb_switch *sw) |
645 | { | |
646 | struct tb_switch *parent_sw; | |
647 | ||
648 | parent_sw = tb_to_switch(sw->dev.parent); | |
649 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
650 | tb_switch_remove(sw); | |
651 | } | |
652 | ||
ee487dd2 MW |
653 | static void add_xdomain(struct tb_switch *sw, u64 route, |
654 | const uuid_t *local_uuid, const uuid_t *remote_uuid, | |
655 | u8 link, u8 depth) | |
656 | { | |
657 | struct tb_xdomain *xd; | |
658 | ||
2d8ff0b5 MW |
659 | pm_runtime_get_sync(&sw->dev); |
660 | ||
ee487dd2 MW |
661 | xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); |
662 | if (!xd) | |
2d8ff0b5 | 663 | goto out; |
ee487dd2 MW |
664 | |
665 | xd->link = link; | |
666 | xd->depth = depth; | |
667 | ||
668 | tb_port_at(route, sw)->xdomain = xd; | |
669 | ||
670 | tb_xdomain_add(xd); | |
2d8ff0b5 MW |
671 | |
672 | out: | |
673 | pm_runtime_mark_last_busy(&sw->dev); | |
674 | pm_runtime_put_autosuspend(&sw->dev); | |
ee487dd2 MW |
675 | } |
676 | ||
677 | static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) | |
678 | { | |
679 | xd->link = link; | |
680 | xd->route = route; | |
681 | xd->is_unplugged = false; | |
682 | } | |
683 | ||
79fae987 MW |
684 | static void remove_xdomain(struct tb_xdomain *xd) |
685 | { | |
686 | struct tb_switch *sw; | |
687 | ||
688 | sw = tb_to_switch(xd->dev.parent); | |
689 | tb_port_at(xd->route, sw)->xdomain = NULL; | |
690 | tb_xdomain_remove(xd); | |
691 | } | |
692 | ||
f67cf491 MW |
693 | static void |
694 | icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
695 | { | |
696 | const struct icm_fr_event_device_connected *pkg = | |
697 | (const struct icm_fr_event_device_connected *)hdr; | |
ee487dd2 | 698 | enum tb_security_level security_level; |
f67cf491 MW |
699 | struct tb_switch *sw, *parent_sw; |
700 | struct icm *icm = tb_priv(tb); | |
701 | bool authorized = false; | |
79fae987 | 702 | struct tb_xdomain *xd; |
f67cf491 | 703 | u8 link, depth; |
14862ee3 | 704 | bool boot; |
f67cf491 MW |
705 | u64 route; |
706 | int ret; | |
707 | ||
3cdb9446 MW |
708 | icm_postpone_rescan(tb); |
709 | ||
f67cf491 MW |
710 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
711 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
712 | ICM_LINK_INFO_DEPTH_SHIFT; | |
713 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
ee487dd2 MW |
714 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
715 | ICM_FLAGS_SLEVEL_SHIFT; | |
14862ee3 | 716 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
f67cf491 | 717 | |
cb653eec MW |
718 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
719 | tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", | |
720 | link, depth); | |
721 | return; | |
722 | } | |
723 | ||
f67cf491 MW |
724 | sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); |
725 | if (sw) { | |
726 | u8 phy_port, sw_phy_port; | |
727 | ||
728 | parent_sw = tb_to_switch(sw->dev.parent); | |
fdd92e89 MW |
729 | sw_phy_port = tb_phy_port_from_link(sw->link); |
730 | phy_port = tb_phy_port_from_link(link); | |
f67cf491 MW |
731 | |
732 | /* | |
733 | * On resume ICM will send us connected events for the | |
734 | * devices that still are present. However, that | |
735 | * information might have changed for example by the | |
736 | * fact that a switch on a dual-link connection might | |
737 | * have been enumerated using the other link now. Make | |
738 | * sure our book keeping matches that. | |
739 | */ | |
740 | if (sw->depth == depth && sw_phy_port == phy_port && | |
741 | !!sw->authorized == authorized) { | |
fdd92e89 MW |
742 | /* |
743 | * It was enumerated through another link so update | |
744 | * route string accordingly. | |
745 | */ | |
746 | if (sw->link != link) { | |
747 | ret = icm->get_route(tb, link, depth, &route); | |
748 | if (ret) { | |
749 | tb_err(tb, "failed to update route string for switch at %u.%u\n", | |
750 | link, depth); | |
751 | tb_switch_put(sw); | |
752 | return; | |
753 | } | |
754 | } else { | |
755 | route = tb_route(sw); | |
756 | } | |
757 | ||
ee487dd2 | 758 | update_switch(parent_sw, sw, route, pkg->connection_id, |
14862ee3 | 759 | pkg->connection_key, link, depth, boot); |
f67cf491 MW |
760 | tb_switch_put(sw); |
761 | return; | |
762 | } | |
763 | ||
764 | /* | |
765 | * User connected the same switch to another physical | |
766 | * port or to another part of the topology. Remove the | |
767 | * existing switch now before adding the new one. | |
768 | */ | |
769 | remove_switch(sw); | |
770 | tb_switch_put(sw); | |
771 | } | |
772 | ||
773 | /* | |
774 | * If the switch was not found by UUID, look for a switch on | |
775 | * same physical port (taking possible link aggregation into | |
776 | * account) and depth. If we found one it is definitely a stale | |
777 | * one so remove it first. | |
778 | */ | |
779 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
780 | if (!sw) { | |
781 | u8 dual_link; | |
782 | ||
783 | dual_link = dual_link_from_link(link); | |
784 | if (dual_link) | |
785 | sw = tb_switch_find_by_link_depth(tb, dual_link, depth); | |
786 | } | |
787 | if (sw) { | |
788 | remove_switch(sw); | |
789 | tb_switch_put(sw); | |
790 | } | |
791 | ||
79fae987 MW |
792 | /* Remove existing XDomain connection if found */ |
793 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); | |
794 | if (xd) { | |
795 | remove_xdomain(xd); | |
796 | tb_xdomain_put(xd); | |
797 | } | |
798 | ||
f67cf491 MW |
799 | parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); |
800 | if (!parent_sw) { | |
801 | tb_err(tb, "failed to find parent switch for %u.%u\n", | |
802 | link, depth); | |
803 | return; | |
804 | } | |
805 | ||
fdd92e89 MW |
806 | ret = icm->get_route(tb, link, depth, &route); |
807 | if (ret) { | |
808 | tb_err(tb, "failed to find route string for switch at %u.%u\n", | |
809 | link, depth); | |
810 | tb_switch_put(parent_sw); | |
811 | return; | |
812 | } | |
813 | ||
2d8ff0b5 MW |
814 | add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
815 | sizeof(pkg->ep_name), pkg->connection_id, | |
ee487dd2 | 816 | pkg->connection_key, link, depth, security_level, |
14862ee3 | 817 | authorized, boot); |
f67cf491 | 818 | |
f67cf491 MW |
819 | tb_switch_put(parent_sw); |
820 | } | |
821 | ||
822 | static void | |
823 | icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
824 | { | |
825 | const struct icm_fr_event_device_disconnected *pkg = | |
826 | (const struct icm_fr_event_device_disconnected *)hdr; | |
827 | struct tb_switch *sw; | |
828 | u8 link, depth; | |
829 | ||
830 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; | |
831 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
832 | ICM_LINK_INFO_DEPTH_SHIFT; | |
833 | ||
f0342e75 | 834 | if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { |
f67cf491 MW |
835 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); |
836 | return; | |
837 | } | |
838 | ||
839 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
840 | if (!sw) { | |
841 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, | |
842 | depth); | |
843 | return; | |
844 | } | |
845 | ||
846 | remove_switch(sw); | |
847 | tb_switch_put(sw); | |
848 | } | |
849 | ||
d1ff7024 MW |
850 | static void |
851 | icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
852 | { | |
853 | const struct icm_fr_event_xdomain_connected *pkg = | |
854 | (const struct icm_fr_event_xdomain_connected *)hdr; | |
855 | struct tb_xdomain *xd; | |
856 | struct tb_switch *sw; | |
857 | u8 link, depth; | |
d1ff7024 MW |
858 | u64 route; |
859 | ||
d1ff7024 MW |
860 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
861 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
862 | ICM_LINK_INFO_DEPTH_SHIFT; | |
d1ff7024 | 863 | |
f0342e75 | 864 | if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { |
d1ff7024 MW |
865 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); |
866 | return; | |
867 | } | |
868 | ||
869 | route = get_route(pkg->local_route_hi, pkg->local_route_lo); | |
870 | ||
871 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
872 | if (xd) { | |
873 | u8 xd_phy_port, phy_port; | |
874 | ||
875 | xd_phy_port = phy_port_from_route(xd->route, xd->depth); | |
876 | phy_port = phy_port_from_route(route, depth); | |
877 | ||
878 | if (xd->depth == depth && xd_phy_port == phy_port) { | |
ee487dd2 | 879 | update_xdomain(xd, route, link); |
d1ff7024 MW |
880 | tb_xdomain_put(xd); |
881 | return; | |
882 | } | |
883 | ||
884 | /* | |
885 | * If we find an existing XDomain connection remove it | |
886 | * now. We need to go through login handshake and | |
887 | * everything anyway to be able to re-establish the | |
888 | * connection. | |
889 | */ | |
890 | remove_xdomain(xd); | |
891 | tb_xdomain_put(xd); | |
892 | } | |
893 | ||
894 | /* | |
895 | * Look if there already exists an XDomain in the same place | |
896 | * than the new one and in that case remove it because it is | |
897 | * most likely another host that got disconnected. | |
898 | */ | |
899 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); | |
900 | if (!xd) { | |
901 | u8 dual_link; | |
902 | ||
903 | dual_link = dual_link_from_link(link); | |
904 | if (dual_link) | |
905 | xd = tb_xdomain_find_by_link_depth(tb, dual_link, | |
906 | depth); | |
907 | } | |
908 | if (xd) { | |
909 | remove_xdomain(xd); | |
910 | tb_xdomain_put(xd); | |
911 | } | |
912 | ||
913 | /* | |
914 | * If the user disconnected a switch during suspend and | |
915 | * connected another host to the same port, remove the switch | |
916 | * first. | |
917 | */ | |
8f965efd MW |
918 | sw = tb_switch_find_by_route(tb, route); |
919 | if (sw) { | |
d1ff7024 | 920 | remove_switch(sw); |
8f965efd MW |
921 | tb_switch_put(sw); |
922 | } | |
d1ff7024 MW |
923 | |
924 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
925 | if (!sw) { | |
926 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, | |
927 | depth); | |
928 | return; | |
929 | } | |
930 | ||
ee487dd2 MW |
931 | add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, |
932 | depth); | |
d1ff7024 MW |
933 | tb_switch_put(sw); |
934 | } | |
935 | ||
936 | static void | |
937 | icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
938 | { | |
939 | const struct icm_fr_event_xdomain_disconnected *pkg = | |
940 | (const struct icm_fr_event_xdomain_disconnected *)hdr; | |
941 | struct tb_xdomain *xd; | |
942 | ||
943 | /* | |
944 | * If the connection is through one or multiple devices, the | |
945 | * XDomain device is removed along with them so it is fine if we | |
946 | * cannot find it here. | |
947 | */ | |
948 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
949 | if (xd) { | |
950 | remove_xdomain(xd); | |
951 | tb_xdomain_put(xd); | |
952 | } | |
953 | } | |
954 | ||
0d53827d MW |
955 | static int icm_tr_cio_reset(struct tb *tb) |
956 | { | |
957 | return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1)); | |
958 | } | |
959 | ||
4bac471d RM |
960 | static int |
961 | icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
2d8ff0b5 | 962 | size_t *nboot_acl, bool *rpm) |
4bac471d RM |
963 | { |
964 | struct icm_tr_pkg_driver_ready_response reply; | |
965 | struct icm_pkg_driver_ready request = { | |
966 | .hdr.code = ICM_DRIVER_READY, | |
967 | }; | |
968 | int ret; | |
969 | ||
970 | memset(&reply, 0, sizeof(reply)); | |
971 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
972 | 1, 20000); | |
973 | if (ret) | |
974 | return ret; | |
975 | ||
976 | if (security_level) | |
977 | *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; | |
978 | if (nboot_acl) | |
979 | *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> | |
980 | ICM_TR_INFO_BOOT_ACL_SHIFT; | |
2d8ff0b5 MW |
981 | if (rpm) |
982 | *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); | |
983 | ||
4bac471d RM |
984 | return 0; |
985 | } | |
986 | ||
987 | static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) | |
988 | { | |
989 | struct icm_tr_pkg_approve_device request; | |
990 | struct icm_tr_pkg_approve_device reply; | |
991 | int ret; | |
992 | ||
993 | memset(&request, 0, sizeof(request)); | |
994 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
995 | request.hdr.code = ICM_APPROVE_DEVICE; | |
996 | request.route_lo = sw->config.route_lo; | |
997 | request.route_hi = sw->config.route_hi; | |
998 | request.connection_id = sw->connection_id; | |
999 | ||
1000 | memset(&reply, 0, sizeof(reply)); | |
1001 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1002 | 1, ICM_APPROVE_TIMEOUT); | |
1003 | if (ret) | |
1004 | return ret; | |
1005 | ||
1006 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
1007 | tb_warn(tb, "PCIe tunnel creation failed\n"); | |
1008 | return -EIO; | |
1009 | } | |
1010 | ||
1011 | return 0; | |
1012 | } | |
1013 | ||
1014 | static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) | |
1015 | { | |
1016 | struct icm_tr_pkg_add_device_key_response reply; | |
1017 | struct icm_tr_pkg_add_device_key request; | |
1018 | int ret; | |
1019 | ||
1020 | memset(&request, 0, sizeof(request)); | |
1021 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
1022 | request.hdr.code = ICM_ADD_DEVICE_KEY; | |
1023 | request.route_lo = sw->config.route_lo; | |
1024 | request.route_hi = sw->config.route_hi; | |
1025 | request.connection_id = sw->connection_id; | |
1026 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); | |
1027 | ||
1028 | memset(&reply, 0, sizeof(reply)); | |
1029 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1030 | 1, ICM_TIMEOUT); | |
1031 | if (ret) | |
1032 | return ret; | |
1033 | ||
1034 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
1035 | tb_warn(tb, "Adding key to switch failed\n"); | |
1036 | return -EIO; | |
1037 | } | |
1038 | ||
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, | |
1043 | const u8 *challenge, u8 *response) | |
1044 | { | |
1045 | struct icm_tr_pkg_challenge_device_response reply; | |
1046 | struct icm_tr_pkg_challenge_device request; | |
1047 | int ret; | |
1048 | ||
1049 | memset(&request, 0, sizeof(request)); | |
1050 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
1051 | request.hdr.code = ICM_CHALLENGE_DEVICE; | |
1052 | request.route_lo = sw->config.route_lo; | |
1053 | request.route_hi = sw->config.route_hi; | |
1054 | request.connection_id = sw->connection_id; | |
1055 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); | |
1056 | ||
1057 | memset(&reply, 0, sizeof(reply)); | |
1058 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1059 | 1, ICM_TIMEOUT); | |
1060 | if (ret) | |
1061 | return ret; | |
1062 | ||
1063 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1064 | return -EKEYREJECTED; | |
1065 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) | |
1066 | return -ENOKEY; | |
1067 | ||
1068 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); | |
1069 | ||
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
1074 | { | |
1075 | struct icm_tr_pkg_approve_xdomain_response reply; | |
1076 | struct icm_tr_pkg_approve_xdomain request; | |
1077 | int ret; | |
1078 | ||
1079 | memset(&request, 0, sizeof(request)); | |
1080 | request.hdr.code = ICM_APPROVE_XDOMAIN; | |
1081 | request.route_hi = upper_32_bits(xd->route); | |
1082 | request.route_lo = lower_32_bits(xd->route); | |
1083 | request.transmit_path = xd->transmit_path; | |
1084 | request.transmit_ring = xd->transmit_ring; | |
1085 | request.receive_path = xd->receive_path; | |
1086 | request.receive_ring = xd->receive_ring; | |
1087 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
1088 | ||
1089 | memset(&reply, 0, sizeof(reply)); | |
1090 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1091 | 1, ICM_TIMEOUT); | |
1092 | if (ret) | |
1093 | return ret; | |
1094 | ||
1095 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1096 | return -EIO; | |
1097 | ||
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, | |
1102 | int stage) | |
1103 | { | |
1104 | struct icm_tr_pkg_disconnect_xdomain_response reply; | |
1105 | struct icm_tr_pkg_disconnect_xdomain request; | |
1106 | int ret; | |
1107 | ||
1108 | memset(&request, 0, sizeof(request)); | |
1109 | request.hdr.code = ICM_DISCONNECT_XDOMAIN; | |
1110 | request.stage = stage; | |
1111 | request.route_hi = upper_32_bits(xd->route); | |
1112 | request.route_lo = lower_32_bits(xd->route); | |
1113 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
1114 | ||
1115 | memset(&reply, 0, sizeof(reply)); | |
1116 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1117 | 1, ICM_TIMEOUT); | |
1118 | if (ret) | |
1119 | return ret; | |
1120 | ||
1121 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1122 | return -EIO; | |
1123 | ||
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
1128 | { | |
1129 | int ret; | |
1130 | ||
1131 | ret = icm_tr_xdomain_tear_down(tb, xd, 1); | |
1132 | if (ret) | |
1133 | return ret; | |
1134 | ||
1135 | usleep_range(10, 50); | |
1136 | return icm_tr_xdomain_tear_down(tb, xd, 2); | |
1137 | } | |
1138 | ||
1139 | static void | |
3cdb9446 MW |
1140 | __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, |
1141 | bool force_rtd3) | |
4bac471d RM |
1142 | { |
1143 | const struct icm_tr_event_device_connected *pkg = | |
1144 | (const struct icm_tr_event_device_connected *)hdr; | |
1145 | enum tb_security_level security_level; | |
1146 | struct tb_switch *sw, *parent_sw; | |
1147 | struct tb_xdomain *xd; | |
1148 | bool authorized, boot; | |
1149 | u64 route; | |
1150 | ||
3cdb9446 MW |
1151 | icm_postpone_rescan(tb); |
1152 | ||
4bac471d RM |
1153 | /* |
1154 | * Currently we don't use the QoS information coming with the | |
1155 | * device connected message so simply just ignore that extra | |
1156 | * packet for now. | |
1157 | */ | |
1158 | if (pkg->hdr.packet_id) | |
1159 | return; | |
1160 | ||
4bac471d RM |
1161 | route = get_route(pkg->route_hi, pkg->route_lo); |
1162 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
1163 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> | |
1164 | ICM_FLAGS_SLEVEL_SHIFT; | |
1165 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; | |
1166 | ||
1167 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { | |
1168 | tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", | |
1169 | route); | |
1170 | return; | |
1171 | } | |
1172 | ||
1173 | sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); | |
1174 | if (sw) { | |
1175 | /* Update the switch if it is still in the same place */ | |
1176 | if (tb_route(sw) == route && !!sw->authorized == authorized) { | |
1177 | parent_sw = tb_to_switch(sw->dev.parent); | |
1178 | update_switch(parent_sw, sw, route, pkg->connection_id, | |
1179 | 0, 0, 0, boot); | |
1180 | tb_switch_put(sw); | |
1181 | return; | |
1182 | } | |
1183 | ||
1184 | remove_switch(sw); | |
1185 | tb_switch_put(sw); | |
1186 | } | |
1187 | ||
1188 | /* Another switch with the same address */ | |
1189 | sw = tb_switch_find_by_route(tb, route); | |
1190 | if (sw) { | |
1191 | remove_switch(sw); | |
1192 | tb_switch_put(sw); | |
1193 | } | |
1194 | ||
1195 | /* XDomain connection with the same address */ | |
1196 | xd = tb_xdomain_find_by_route(tb, route); | |
1197 | if (xd) { | |
1198 | remove_xdomain(xd); | |
1199 | tb_xdomain_put(xd); | |
1200 | } | |
1201 | ||
1202 | parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); | |
1203 | if (!parent_sw) { | |
1204 | tb_err(tb, "failed to find parent switch for %llx\n", route); | |
1205 | return; | |
1206 | } | |
1207 | ||
3cdb9446 MW |
1208 | sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
1209 | sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0, | |
1210 | security_level, authorized, boot); | |
1211 | if (!IS_ERR(sw) && force_rtd3) | |
1212 | sw->rpm = true; | |
4bac471d RM |
1213 | |
1214 | tb_switch_put(parent_sw); | |
1215 | } | |
1216 | ||
3cdb9446 MW |
1217 | static void |
1218 | icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1219 | { | |
1220 | __icm_tr_device_connected(tb, hdr, false); | |
1221 | } | |
1222 | ||
4bac471d RM |
1223 | static void |
1224 | icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1225 | { | |
1226 | const struct icm_tr_event_device_disconnected *pkg = | |
1227 | (const struct icm_tr_event_device_disconnected *)hdr; | |
1228 | struct tb_switch *sw; | |
1229 | u64 route; | |
1230 | ||
1231 | route = get_route(pkg->route_hi, pkg->route_lo); | |
1232 | ||
1233 | sw = tb_switch_find_by_route(tb, route); | |
1234 | if (!sw) { | |
1235 | tb_warn(tb, "no switch exists at %llx, ignoring\n", route); | |
1236 | return; | |
1237 | } | |
1238 | ||
1239 | remove_switch(sw); | |
1240 | tb_switch_put(sw); | |
1241 | } | |
1242 | ||
1243 | static void | |
1244 | icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1245 | { | |
1246 | const struct icm_tr_event_xdomain_connected *pkg = | |
1247 | (const struct icm_tr_event_xdomain_connected *)hdr; | |
1248 | struct tb_xdomain *xd; | |
1249 | struct tb_switch *sw; | |
1250 | u64 route; | |
1251 | ||
1252 | if (!tb->root_switch) | |
1253 | return; | |
1254 | ||
1255 | route = get_route(pkg->local_route_hi, pkg->local_route_lo); | |
1256 | ||
1257 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
1258 | if (xd) { | |
1259 | if (xd->route == route) { | |
1260 | update_xdomain(xd, route, 0); | |
1261 | tb_xdomain_put(xd); | |
1262 | return; | |
1263 | } | |
1264 | ||
1265 | remove_xdomain(xd); | |
1266 | tb_xdomain_put(xd); | |
1267 | } | |
1268 | ||
1269 | /* An existing xdomain with the same address */ | |
1270 | xd = tb_xdomain_find_by_route(tb, route); | |
1271 | if (xd) { | |
1272 | remove_xdomain(xd); | |
1273 | tb_xdomain_put(xd); | |
1274 | } | |
1275 | ||
1276 | /* | |
1277 | * If the user disconnected a switch during suspend and | |
1278 | * connected another host to the same port, remove the switch | |
1279 | * first. | |
1280 | */ | |
8f965efd MW |
1281 | sw = tb_switch_find_by_route(tb, route); |
1282 | if (sw) { | |
4bac471d | 1283 | remove_switch(sw); |
8f965efd MW |
1284 | tb_switch_put(sw); |
1285 | } | |
4bac471d RM |
1286 | |
1287 | sw = tb_switch_find_by_route(tb, get_parent_route(route)); | |
1288 | if (!sw) { | |
1289 | tb_warn(tb, "no switch exists at %llx, ignoring\n", route); | |
1290 | return; | |
1291 | } | |
1292 | ||
1293 | add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); | |
1294 | tb_switch_put(sw); | |
1295 | } | |
1296 | ||
1297 | static void | |
1298 | icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1299 | { | |
1300 | const struct icm_tr_event_xdomain_disconnected *pkg = | |
1301 | (const struct icm_tr_event_xdomain_disconnected *)hdr; | |
1302 | struct tb_xdomain *xd; | |
1303 | u64 route; | |
1304 | ||
1305 | route = get_route(pkg->route_hi, pkg->route_lo); | |
1306 | ||
1307 | xd = tb_xdomain_find_by_route(tb, route); | |
1308 | if (xd) { | |
1309 | remove_xdomain(xd); | |
1310 | tb_xdomain_put(xd); | |
1311 | } | |
1312 | } | |
1313 | ||
f67cf491 MW |
1314 | static struct pci_dev *get_upstream_port(struct pci_dev *pdev) |
1315 | { | |
1316 | struct pci_dev *parent; | |
1317 | ||
1318 | parent = pci_upstream_bridge(pdev); | |
1319 | while (parent) { | |
1320 | if (!pci_is_pcie(parent)) | |
1321 | return NULL; | |
1322 | if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) | |
1323 | break; | |
1324 | parent = pci_upstream_bridge(parent); | |
1325 | } | |
1326 | ||
1327 | if (!parent) | |
1328 | return NULL; | |
1329 | ||
1330 | switch (parent->device) { | |
1331 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: | |
1332 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: | |
1333 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: | |
1334 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: | |
1335 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: | |
c4630d6a MW |
1336 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
1337 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: | |
f67cf491 MW |
1338 | return parent; |
1339 | } | |
1340 | ||
1341 | return NULL; | |
1342 | } | |
1343 | ||
1344 | static bool icm_ar_is_supported(struct tb *tb) | |
1345 | { | |
1346 | struct pci_dev *upstream_port; | |
1347 | struct icm *icm = tb_priv(tb); | |
1348 | ||
1349 | /* | |
1350 | * Starting from Alpine Ridge we can use ICM on Apple machines | |
1351 | * as well. We just need to reset and re-enable it first. | |
1352 | */ | |
630b3aff | 1353 | if (!x86_apple_machine) |
f67cf491 MW |
1354 | return true; |
1355 | ||
1356 | /* | |
1357 | * Find the upstream PCIe port in case we need to do reset | |
1358 | * through its vendor specific registers. | |
1359 | */ | |
1360 | upstream_port = get_upstream_port(tb->nhi->pdev); | |
1361 | if (upstream_port) { | |
1362 | int cap; | |
1363 | ||
1364 | cap = pci_find_ext_capability(upstream_port, | |
1365 | PCI_EXT_CAP_ID_VNDR); | |
1366 | if (cap > 0) { | |
1367 | icm->upstream_port = upstream_port; | |
1368 | icm->vnd_cap = cap; | |
1369 | ||
1370 | return true; | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | return false; | |
1375 | } | |
1376 | ||
0d53827d MW |
1377 | static int icm_ar_cio_reset(struct tb *tb) |
1378 | { | |
1379 | return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9)); | |
1380 | } | |
1381 | ||
f67cf491 MW |
1382 | static int icm_ar_get_mode(struct tb *tb) |
1383 | { | |
1384 | struct tb_nhi *nhi = tb->nhi; | |
e4be8c9b | 1385 | int retries = 60; |
f67cf491 MW |
1386 | u32 val; |
1387 | ||
1388 | do { | |
1389 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1390 | if (val & REG_FW_STS_NVM_AUTH_DONE) | |
1391 | break; | |
e4be8c9b | 1392 | msleep(50); |
f67cf491 MW |
1393 | } while (--retries); |
1394 | ||
1395 | if (!retries) { | |
1396 | dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); | |
1397 | return -ENODEV; | |
1398 | } | |
1399 | ||
1400 | return nhi_mailbox_mode(nhi); | |
1401 | } | |
1402 | ||
9aaa3b8b MW |
1403 | static int |
1404 | icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
2d8ff0b5 | 1405 | size_t *nboot_acl, bool *rpm) |
9aaa3b8b MW |
1406 | { |
1407 | struct icm_ar_pkg_driver_ready_response reply; | |
1408 | struct icm_pkg_driver_ready request = { | |
1409 | .hdr.code = ICM_DRIVER_READY, | |
1410 | }; | |
1411 | int ret; | |
1412 | ||
1413 | memset(&reply, 0, sizeof(reply)); | |
1414 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1415 | 1, ICM_TIMEOUT); | |
1416 | if (ret) | |
1417 | return ret; | |
1418 | ||
1419 | if (security_level) | |
1420 | *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; | |
1421 | if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) | |
1422 | *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> | |
1423 | ICM_AR_INFO_BOOT_ACL_SHIFT; | |
2d8ff0b5 MW |
1424 | if (rpm) |
1425 | *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); | |
1426 | ||
9aaa3b8b MW |
1427 | return 0; |
1428 | } | |
1429 | ||
f67cf491 MW |
1430 | static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) |
1431 | { | |
1432 | struct icm_ar_pkg_get_route_response reply; | |
1433 | struct icm_ar_pkg_get_route request = { | |
1434 | .hdr = { .code = ICM_GET_ROUTE }, | |
1435 | .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, | |
1436 | }; | |
1437 | int ret; | |
1438 | ||
1439 | memset(&reply, 0, sizeof(reply)); | |
1440 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1441 | 1, ICM_TIMEOUT); | |
1442 | if (ret) | |
1443 | return ret; | |
1444 | ||
1445 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1446 | return -EIO; | |
1447 | ||
1448 | *route = get_route(reply.route_hi, reply.route_lo); | |
1449 | return 0; | |
1450 | } | |
1451 | ||
9aaa3b8b MW |
1452 | static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) |
1453 | { | |
1454 | struct icm_ar_pkg_preboot_acl_response reply; | |
1455 | struct icm_ar_pkg_preboot_acl request = { | |
1456 | .hdr = { .code = ICM_PREBOOT_ACL }, | |
1457 | }; | |
1458 | int ret, i; | |
1459 | ||
1460 | memset(&reply, 0, sizeof(reply)); | |
1461 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1462 | 1, ICM_TIMEOUT); | |
1463 | if (ret) | |
1464 | return ret; | |
1465 | ||
1466 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1467 | return -EIO; | |
1468 | ||
1469 | for (i = 0; i < nuuids; i++) { | |
1470 | u32 *uuid = (u32 *)&uuids[i]; | |
1471 | ||
1472 | uuid[0] = reply.acl[i].uuid_lo; | |
1473 | uuid[1] = reply.acl[i].uuid_hi; | |
1474 | ||
1475 | if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { | |
1476 | /* Map empty entries to null UUID */ | |
1477 | uuid[0] = 0; | |
1478 | uuid[1] = 0; | |
dd010bd7 | 1479 | } else if (uuid[0] != 0 || uuid[1] != 0) { |
9aaa3b8b MW |
1480 | /* Upper two DWs are always one's */ |
1481 | uuid[2] = 0xffffffff; | |
1482 | uuid[3] = 0xffffffff; | |
1483 | } | |
1484 | } | |
1485 | ||
1486 | return ret; | |
1487 | } | |
1488 | ||
1489 | static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, | |
1490 | size_t nuuids) | |
1491 | { | |
1492 | struct icm_ar_pkg_preboot_acl_response reply; | |
1493 | struct icm_ar_pkg_preboot_acl request = { | |
1494 | .hdr = { | |
1495 | .code = ICM_PREBOOT_ACL, | |
1496 | .flags = ICM_FLAGS_WRITE, | |
1497 | }, | |
1498 | }; | |
1499 | int ret, i; | |
1500 | ||
1501 | for (i = 0; i < nuuids; i++) { | |
1502 | const u32 *uuid = (const u32 *)&uuids[i]; | |
1503 | ||
1504 | if (uuid_is_null(&uuids[i])) { | |
1505 | /* | |
1506 | * Map null UUID to the empty (all one) entries | |
1507 | * for ICM. | |
1508 | */ | |
1509 | request.acl[i].uuid_lo = 0xffffffff; | |
1510 | request.acl[i].uuid_hi = 0xffffffff; | |
1511 | } else { | |
1512 | /* Two high DWs need to be set to all one */ | |
1513 | if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) | |
1514 | return -EINVAL; | |
1515 | ||
1516 | request.acl[i].uuid_lo = uuid[0]; | |
1517 | request.acl[i].uuid_hi = uuid[1]; | |
1518 | } | |
1519 | } | |
1520 | ||
1521 | memset(&reply, 0, sizeof(reply)); | |
1522 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1523 | 1, ICM_TIMEOUT); | |
1524 | if (ret) | |
1525 | return ret; | |
1526 | ||
1527 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1528 | return -EIO; | |
1529 | ||
1530 | return 0; | |
1531 | } | |
1532 | ||
3cdb9446 MW |
1533 | static int |
1534 | icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
1535 | size_t *nboot_acl, bool *rpm) | |
1536 | { | |
1537 | struct icm_tr_pkg_driver_ready_response reply; | |
1538 | struct icm_pkg_driver_ready request = { | |
1539 | .hdr.code = ICM_DRIVER_READY, | |
1540 | }; | |
1541 | int ret; | |
1542 | ||
1543 | memset(&reply, 0, sizeof(reply)); | |
1544 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1545 | 1, 20000); | |
1546 | if (ret) | |
1547 | return ret; | |
1548 | ||
1549 | /* Ice Lake always supports RTD3 */ | |
1550 | if (rpm) | |
1551 | *rpm = true; | |
1552 | ||
1553 | return 0; | |
1554 | } | |
1555 | ||
1556 | static void icm_icl_set_uuid(struct tb *tb) | |
1557 | { | |
1558 | struct tb_nhi *nhi = tb->nhi; | |
1559 | u32 uuid[4]; | |
1560 | ||
1561 | pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); | |
1562 | pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); | |
1563 | uuid[2] = 0xffffffff; | |
1564 | uuid[3] = 0xffffffff; | |
1565 | ||
1566 | tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); | |
1567 | } | |
1568 | ||
1569 | static void | |
1570 | icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1571 | { | |
1572 | __icm_tr_device_connected(tb, hdr, true); | |
1573 | } | |
1574 | ||
1575 | static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) | |
1576 | { | |
1577 | const struct icm_icl_event_rtd3_veto *pkg = | |
1578 | (const struct icm_icl_event_rtd3_veto *)hdr; | |
1579 | ||
1580 | tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); | |
1581 | ||
1582 | if (pkg->veto_reason) | |
1583 | icm_veto_begin(tb); | |
1584 | else | |
1585 | icm_veto_end(tb); | |
1586 | } | |
1587 | ||
f67cf491 MW |
1588 | static void icm_handle_notification(struct work_struct *work) |
1589 | { | |
1590 | struct icm_notification *n = container_of(work, typeof(*n), work); | |
1591 | struct tb *tb = n->tb; | |
1592 | struct icm *icm = tb_priv(tb); | |
1593 | ||
1594 | mutex_lock(&tb->lock); | |
1595 | ||
86da809d MW |
1596 | /* |
1597 | * When the domain is stopped we flush its workqueue but before | |
1598 | * that the root switch is removed. In that case we should treat | |
1599 | * the queued events as being canceled. | |
1600 | */ | |
1601 | if (tb->root_switch) { | |
1602 | switch (n->pkg->code) { | |
1603 | case ICM_EVENT_DEVICE_CONNECTED: | |
1604 | icm->device_connected(tb, n->pkg); | |
1605 | break; | |
1606 | case ICM_EVENT_DEVICE_DISCONNECTED: | |
1607 | icm->device_disconnected(tb, n->pkg); | |
1608 | break; | |
1609 | case ICM_EVENT_XDOMAIN_CONNECTED: | |
1610 | icm->xdomain_connected(tb, n->pkg); | |
1611 | break; | |
1612 | case ICM_EVENT_XDOMAIN_DISCONNECTED: | |
1613 | icm->xdomain_disconnected(tb, n->pkg); | |
1614 | break; | |
3cdb9446 MW |
1615 | case ICM_EVENT_RTD3_VETO: |
1616 | icm->rtd3_veto(tb, n->pkg); | |
1617 | break; | |
86da809d | 1618 | } |
f67cf491 MW |
1619 | } |
1620 | ||
1621 | mutex_unlock(&tb->lock); | |
1622 | ||
1623 | kfree(n->pkg); | |
1624 | kfree(n); | |
1625 | } | |
1626 | ||
1627 | static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, | |
1628 | const void *buf, size_t size) | |
1629 | { | |
1630 | struct icm_notification *n; | |
1631 | ||
1632 | n = kmalloc(sizeof(*n), GFP_KERNEL); | |
1633 | if (!n) | |
1634 | return; | |
1635 | ||
1636 | INIT_WORK(&n->work, icm_handle_notification); | |
1637 | n->pkg = kmemdup(buf, size, GFP_KERNEL); | |
1638 | n->tb = tb; | |
1639 | ||
1640 | queue_work(tb->wq, &n->work); | |
1641 | } | |
1642 | ||
1643 | static int | |
9aaa3b8b | 1644 | __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
2d8ff0b5 | 1645 | size_t *nboot_acl, bool *rpm) |
f67cf491 | 1646 | { |
3080e197 | 1647 | struct icm *icm = tb_priv(tb); |
44b51bbb | 1648 | unsigned int retries = 50; |
f67cf491 MW |
1649 | int ret; |
1650 | ||
2d8ff0b5 | 1651 | ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); |
3080e197 MW |
1652 | if (ret) { |
1653 | tb_err(tb, "failed to send driver ready to ICM\n"); | |
f67cf491 | 1654 | return ret; |
3080e197 | 1655 | } |
f67cf491 MW |
1656 | |
1657 | /* | |
1658 | * Hold on here until the switch config space is accessible so | |
1659 | * that we can read root switch config successfully. | |
1660 | */ | |
1661 | do { | |
1662 | struct tb_cfg_result res; | |
1663 | u32 tmp; | |
1664 | ||
1665 | res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, | |
1666 | 0, 1, 100); | |
1667 | if (!res.err) | |
1668 | return 0; | |
1669 | ||
1670 | msleep(50); | |
1671 | } while (--retries); | |
1672 | ||
44b51bbb | 1673 | tb_err(tb, "failed to read root switch config space, giving up\n"); |
f67cf491 MW |
1674 | return -ETIMEDOUT; |
1675 | } | |
1676 | ||
f67cf491 MW |
1677 | static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) |
1678 | { | |
1679 | struct icm *icm = tb_priv(tb); | |
1680 | u32 val; | |
1681 | ||
ea9d7bb7 MW |
1682 | if (!icm->upstream_port) |
1683 | return -ENODEV; | |
1684 | ||
f67cf491 MW |
1685 | /* Put ARC to wait for CIO reset event to happen */ |
1686 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1687 | val |= REG_FW_STS_CIO_RESET_REQ; | |
1688 | iowrite32(val, nhi->iobase + REG_FW_STS); | |
1689 | ||
1690 | /* Re-start ARC */ | |
1691 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1692 | val |= REG_FW_STS_ICM_EN_INVERT; | |
1693 | val |= REG_FW_STS_ICM_EN_CPU; | |
1694 | iowrite32(val, nhi->iobase + REG_FW_STS); | |
1695 | ||
1696 | /* Trigger CIO reset now */ | |
0d53827d | 1697 | return icm->cio_reset(tb); |
f67cf491 MW |
1698 | } |
1699 | ||
1700 | static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) | |
1701 | { | |
1702 | unsigned int retries = 10; | |
1703 | int ret; | |
1704 | u32 val; | |
1705 | ||
1706 | /* Check if the ICM firmware is already running */ | |
1707 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1708 | if (val & REG_FW_STS_ICM_EN) | |
1709 | return 0; | |
1710 | ||
62efe699 | 1711 | dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); |
f67cf491 MW |
1712 | |
1713 | ret = icm_firmware_reset(tb, nhi); | |
1714 | if (ret) | |
1715 | return ret; | |
1716 | ||
1717 | /* Wait until the ICM firmware tells us it is up and running */ | |
1718 | do { | |
1719 | /* Check that the ICM firmware is running */ | |
1720 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1721 | if (val & REG_FW_STS_NVM_AUTH_DONE) | |
1722 | return 0; | |
1723 | ||
1724 | msleep(300); | |
1725 | } while (--retries); | |
1726 | ||
1727 | return -ETIMEDOUT; | |
1728 | } | |
1729 | ||
1730 | static int icm_reset_phy_port(struct tb *tb, int phy_port) | |
1731 | { | |
1732 | struct icm *icm = tb_priv(tb); | |
1733 | u32 state0, state1; | |
1734 | int port0, port1; | |
1735 | u32 val0, val1; | |
1736 | int ret; | |
1737 | ||
1738 | if (!icm->upstream_port) | |
1739 | return 0; | |
1740 | ||
1741 | if (phy_port) { | |
1742 | port0 = 3; | |
1743 | port1 = 4; | |
1744 | } else { | |
1745 | port0 = 1; | |
1746 | port1 = 2; | |
1747 | } | |
1748 | ||
1749 | /* | |
1750 | * Read link status of both null ports belonging to a single | |
1751 | * physical port. | |
1752 | */ | |
1753 | ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); | |
1754 | if (ret) | |
1755 | return ret; | |
1756 | ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); | |
1757 | if (ret) | |
1758 | return ret; | |
1759 | ||
1760 | state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; | |
1761 | state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; | |
1762 | state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; | |
1763 | state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; | |
1764 | ||
1765 | /* If they are both up we need to reset them now */ | |
1766 | if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) | |
1767 | return 0; | |
1768 | ||
1769 | val0 |= PHY_PORT_CS1_LINK_DISABLE; | |
1770 | ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); | |
1771 | if (ret) | |
1772 | return ret; | |
1773 | ||
1774 | val1 |= PHY_PORT_CS1_LINK_DISABLE; | |
1775 | ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); | |
1776 | if (ret) | |
1777 | return ret; | |
1778 | ||
1779 | /* Wait a bit and then re-enable both ports */ | |
1780 | usleep_range(10, 100); | |
1781 | ||
1782 | ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); | |
1783 | if (ret) | |
1784 | return ret; | |
1785 | ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); | |
1786 | if (ret) | |
1787 | return ret; | |
1788 | ||
1789 | val0 &= ~PHY_PORT_CS1_LINK_DISABLE; | |
1790 | ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); | |
1791 | if (ret) | |
1792 | return ret; | |
1793 | ||
1794 | val1 &= ~PHY_PORT_CS1_LINK_DISABLE; | |
1795 | return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); | |
1796 | } | |
1797 | ||
1798 | static int icm_firmware_init(struct tb *tb) | |
1799 | { | |
1800 | struct icm *icm = tb_priv(tb); | |
1801 | struct tb_nhi *nhi = tb->nhi; | |
1802 | int ret; | |
1803 | ||
1804 | ret = icm_firmware_start(tb, nhi); | |
1805 | if (ret) { | |
1806 | dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); | |
1807 | return ret; | |
1808 | } | |
1809 | ||
1810 | if (icm->get_mode) { | |
1811 | ret = icm->get_mode(tb); | |
1812 | ||
1813 | switch (ret) { | |
e6b245cc MW |
1814 | case NHI_FW_SAFE_MODE: |
1815 | icm->safe_mode = true; | |
1816 | break; | |
1817 | ||
f67cf491 MW |
1818 | case NHI_FW_CM_MODE: |
1819 | /* Ask ICM to accept all Thunderbolt devices */ | |
1820 | nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); | |
1821 | break; | |
1822 | ||
1823 | default: | |
e4be8c9b MW |
1824 | if (ret < 0) |
1825 | return ret; | |
1826 | ||
f67cf491 MW |
1827 | tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); |
1828 | return -ENODEV; | |
1829 | } | |
1830 | } | |
1831 | ||
1832 | /* | |
1833 | * Reset both physical ports if there is anything connected to | |
1834 | * them already. | |
1835 | */ | |
1836 | ret = icm_reset_phy_port(tb, 0); | |
1837 | if (ret) | |
1838 | dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); | |
1839 | ret = icm_reset_phy_port(tb, 1); | |
1840 | if (ret) | |
1841 | dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); | |
1842 | ||
1843 | return 0; | |
1844 | } | |
1845 | ||
1846 | static int icm_driver_ready(struct tb *tb) | |
1847 | { | |
e6b245cc | 1848 | struct icm *icm = tb_priv(tb); |
f67cf491 MW |
1849 | int ret; |
1850 | ||
1851 | ret = icm_firmware_init(tb); | |
1852 | if (ret) | |
1853 | return ret; | |
1854 | ||
e6b245cc MW |
1855 | if (icm->safe_mode) { |
1856 | tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); | |
1857 | tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); | |
1858 | tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); | |
1859 | return 0; | |
1860 | } | |
1861 | ||
2d8ff0b5 MW |
1862 | ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, |
1863 | &icm->rpm); | |
9aaa3b8b MW |
1864 | if (ret) |
1865 | return ret; | |
1866 | ||
1867 | /* | |
1868 | * Make sure the number of supported preboot ACL matches what we | |
1869 | * expect or disable the whole feature. | |
1870 | */ | |
1871 | if (tb->nboot_acl > icm->max_boot_acl) | |
1872 | tb->nboot_acl = 0; | |
1873 | ||
1874 | return 0; | |
f67cf491 MW |
1875 | } |
1876 | ||
1877 | static int icm_suspend(struct tb *tb) | |
1878 | { | |
d04522fa | 1879 | struct icm *icm = tb_priv(tb); |
a684c5b1 | 1880 | |
d04522fa MW |
1881 | if (icm->save_devices) |
1882 | icm->save_devices(tb); | |
a684c5b1 | 1883 | |
d04522fa | 1884 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); |
a684c5b1 | 1885 | return 0; |
f67cf491 MW |
1886 | } |
1887 | ||
1888 | /* | |
1889 | * Mark all switches (except root switch) below this one unplugged. ICM | |
1890 | * firmware will send us an updated list of switches after we have send | |
1891 | * it driver ready command. If a switch is not in that list it will be | |
1892 | * removed when we perform rescan. | |
1893 | */ | |
1894 | static void icm_unplug_children(struct tb_switch *sw) | |
1895 | { | |
1896 | unsigned int i; | |
1897 | ||
1898 | if (tb_route(sw)) | |
1899 | sw->is_unplugged = true; | |
1900 | ||
1901 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1902 | struct tb_port *port = &sw->ports[i]; | |
1903 | ||
dfe40ca4 | 1904 | if (port->xdomain) |
d1ff7024 | 1905 | port->xdomain->is_unplugged = true; |
dfe40ca4 MW |
1906 | else if (tb_port_has_remote(port)) |
1907 | icm_unplug_children(port->remote->sw); | |
f67cf491 MW |
1908 | } |
1909 | } | |
1910 | ||
4f7c2e0d MW |
1911 | static int complete_rpm(struct device *dev, void *data) |
1912 | { | |
1913 | struct tb_switch *sw = tb_to_switch(dev); | |
1914 | ||
1915 | if (sw) | |
1916 | complete(&sw->rpm_complete); | |
1917 | return 0; | |
1918 | } | |
1919 | ||
1920 | static void remove_unplugged_switch(struct tb_switch *sw) | |
1921 | { | |
1922 | pm_runtime_get_sync(sw->dev.parent); | |
1923 | ||
1924 | /* | |
1925 | * Signal this and switches below for rpm_complete because | |
1926 | * tb_switch_remove() calls pm_runtime_get_sync() that then waits | |
1927 | * for it. | |
1928 | */ | |
1929 | complete_rpm(&sw->dev, NULL); | |
1930 | bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); | |
1931 | tb_switch_remove(sw); | |
1932 | ||
1933 | pm_runtime_mark_last_busy(sw->dev.parent); | |
1934 | pm_runtime_put_autosuspend(sw->dev.parent); | |
1935 | } | |
1936 | ||
f67cf491 MW |
1937 | static void icm_free_unplugged_children(struct tb_switch *sw) |
1938 | { | |
1939 | unsigned int i; | |
1940 | ||
1941 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1942 | struct tb_port *port = &sw->ports[i]; | |
1943 | ||
d1ff7024 MW |
1944 | if (port->xdomain && port->xdomain->is_unplugged) { |
1945 | tb_xdomain_remove(port->xdomain); | |
1946 | port->xdomain = NULL; | |
dfe40ca4 MW |
1947 | } else if (tb_port_has_remote(port)) { |
1948 | if (port->remote->sw->is_unplugged) { | |
4f7c2e0d | 1949 | remove_unplugged_switch(port->remote->sw); |
dfe40ca4 MW |
1950 | port->remote = NULL; |
1951 | } else { | |
1952 | icm_free_unplugged_children(port->remote->sw); | |
1953 | } | |
f67cf491 MW |
1954 | } |
1955 | } | |
1956 | } | |
1957 | ||
1958 | static void icm_rescan_work(struct work_struct *work) | |
1959 | { | |
1960 | struct icm *icm = container_of(work, struct icm, rescan_work.work); | |
1961 | struct tb *tb = icm_to_tb(icm); | |
1962 | ||
1963 | mutex_lock(&tb->lock); | |
1964 | if (tb->root_switch) | |
1965 | icm_free_unplugged_children(tb->root_switch); | |
1966 | mutex_unlock(&tb->lock); | |
1967 | } | |
1968 | ||
1969 | static void icm_complete(struct tb *tb) | |
1970 | { | |
1971 | struct icm *icm = tb_priv(tb); | |
1972 | ||
1973 | if (tb->nhi->going_away) | |
1974 | return; | |
1975 | ||
3cdb9446 MW |
1976 | /* |
1977 | * If RTD3 was vetoed before we entered system suspend allow it | |
1978 | * again now before driver ready is sent. Firmware sends a new RTD3 | |
1979 | * veto if it is still the case after we have sent it driver ready | |
1980 | * command. | |
1981 | */ | |
1982 | icm_veto_end(tb); | |
f67cf491 MW |
1983 | icm_unplug_children(tb->root_switch); |
1984 | ||
1985 | /* | |
1986 | * Now all existing children should be resumed, start events | |
1987 | * from ICM to get updated status. | |
1988 | */ | |
2d8ff0b5 | 1989 | __icm_driver_ready(tb, NULL, NULL, NULL); |
f67cf491 MW |
1990 | |
1991 | /* | |
1992 | * We do not get notifications of devices that have been | |
1993 | * unplugged during suspend so schedule rescan to clean them up | |
1994 | * if any. | |
1995 | */ | |
1996 | queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); | |
1997 | } | |
1998 | ||
2d8ff0b5 MW |
1999 | static int icm_runtime_suspend(struct tb *tb) |
2000 | { | |
2001 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | |
2002 | return 0; | |
2003 | } | |
2004 | ||
4f7c2e0d MW |
2005 | static int icm_runtime_suspend_switch(struct tb_switch *sw) |
2006 | { | |
2007 | if (tb_route(sw)) | |
2008 | reinit_completion(&sw->rpm_complete); | |
2009 | return 0; | |
2010 | } | |
2011 | ||
2012 | static int icm_runtime_resume_switch(struct tb_switch *sw) | |
2013 | { | |
2014 | if (tb_route(sw)) { | |
2015 | if (!wait_for_completion_timeout(&sw->rpm_complete, | |
2016 | msecs_to_jiffies(500))) { | |
2017 | dev_dbg(&sw->dev, "runtime resuming timed out\n"); | |
2018 | } | |
2019 | } | |
2020 | return 0; | |
2021 | } | |
2022 | ||
2d8ff0b5 MW |
2023 | static int icm_runtime_resume(struct tb *tb) |
2024 | { | |
2025 | /* | |
2026 | * We can reuse the same resume functionality than with system | |
2027 | * suspend. | |
2028 | */ | |
2029 | icm_complete(tb); | |
2030 | return 0; | |
2031 | } | |
2032 | ||
f67cf491 MW |
2033 | static int icm_start(struct tb *tb) |
2034 | { | |
e6b245cc | 2035 | struct icm *icm = tb_priv(tb); |
f67cf491 MW |
2036 | int ret; |
2037 | ||
e6b245cc MW |
2038 | if (icm->safe_mode) |
2039 | tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); | |
2040 | else | |
2041 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); | |
444ac384 MW |
2042 | if (IS_ERR(tb->root_switch)) |
2043 | return PTR_ERR(tb->root_switch); | |
f67cf491 | 2044 | |
f437c24b | 2045 | tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; |
2d8ff0b5 | 2046 | tb->root_switch->rpm = icm->rpm; |
e6b245cc | 2047 | |
3cdb9446 MW |
2048 | if (icm->set_uuid) |
2049 | icm->set_uuid(tb); | |
2050 | ||
f67cf491 | 2051 | ret = tb_switch_add(tb->root_switch); |
d1ff7024 | 2052 | if (ret) { |
f67cf491 | 2053 | tb_switch_put(tb->root_switch); |
d1ff7024 MW |
2054 | tb->root_switch = NULL; |
2055 | } | |
f67cf491 MW |
2056 | |
2057 | return ret; | |
2058 | } | |
2059 | ||
2060 | static void icm_stop(struct tb *tb) | |
2061 | { | |
2062 | struct icm *icm = tb_priv(tb); | |
2063 | ||
2064 | cancel_delayed_work(&icm->rescan_work); | |
2065 | tb_switch_remove(tb->root_switch); | |
2066 | tb->root_switch = NULL; | |
2067 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | |
2068 | } | |
2069 | ||
e6b245cc MW |
2070 | static int icm_disconnect_pcie_paths(struct tb *tb) |
2071 | { | |
2072 | return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); | |
2073 | } | |
2074 | ||
9aaa3b8b | 2075 | /* Falcon Ridge */ |
f67cf491 MW |
2076 | static const struct tb_cm_ops icm_fr_ops = { |
2077 | .driver_ready = icm_driver_ready, | |
2078 | .start = icm_start, | |
2079 | .stop = icm_stop, | |
2080 | .suspend = icm_suspend, | |
2081 | .complete = icm_complete, | |
2082 | .handle_event = icm_handle_event, | |
2083 | .approve_switch = icm_fr_approve_switch, | |
2084 | .add_switch_key = icm_fr_add_switch_key, | |
2085 | .challenge_switch_key = icm_fr_challenge_switch_key, | |
e6b245cc | 2086 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
d1ff7024 MW |
2087 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, |
2088 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, | |
f67cf491 MW |
2089 | }; |
2090 | ||
9aaa3b8b MW |
2091 | /* Alpine Ridge */ |
2092 | static const struct tb_cm_ops icm_ar_ops = { | |
2093 | .driver_ready = icm_driver_ready, | |
2094 | .start = icm_start, | |
2095 | .stop = icm_stop, | |
2096 | .suspend = icm_suspend, | |
2097 | .complete = icm_complete, | |
2d8ff0b5 MW |
2098 | .runtime_suspend = icm_runtime_suspend, |
2099 | .runtime_resume = icm_runtime_resume, | |
4f7c2e0d MW |
2100 | .runtime_suspend_switch = icm_runtime_suspend_switch, |
2101 | .runtime_resume_switch = icm_runtime_resume_switch, | |
9aaa3b8b MW |
2102 | .handle_event = icm_handle_event, |
2103 | .get_boot_acl = icm_ar_get_boot_acl, | |
2104 | .set_boot_acl = icm_ar_set_boot_acl, | |
2105 | .approve_switch = icm_fr_approve_switch, | |
2106 | .add_switch_key = icm_fr_add_switch_key, | |
2107 | .challenge_switch_key = icm_fr_challenge_switch_key, | |
2108 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | |
2109 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, | |
2110 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, | |
2111 | }; | |
2112 | ||
4bac471d RM |
2113 | /* Titan Ridge */ |
2114 | static const struct tb_cm_ops icm_tr_ops = { | |
2115 | .driver_ready = icm_driver_ready, | |
2116 | .start = icm_start, | |
2117 | .stop = icm_stop, | |
2118 | .suspend = icm_suspend, | |
2119 | .complete = icm_complete, | |
2d8ff0b5 MW |
2120 | .runtime_suspend = icm_runtime_suspend, |
2121 | .runtime_resume = icm_runtime_resume, | |
4f7c2e0d MW |
2122 | .runtime_suspend_switch = icm_runtime_suspend_switch, |
2123 | .runtime_resume_switch = icm_runtime_resume_switch, | |
4bac471d RM |
2124 | .handle_event = icm_handle_event, |
2125 | .get_boot_acl = icm_ar_get_boot_acl, | |
2126 | .set_boot_acl = icm_ar_set_boot_acl, | |
2127 | .approve_switch = icm_tr_approve_switch, | |
2128 | .add_switch_key = icm_tr_add_switch_key, | |
2129 | .challenge_switch_key = icm_tr_challenge_switch_key, | |
2130 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | |
2131 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, | |
2132 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, | |
2133 | }; | |
2134 | ||
3cdb9446 MW |
2135 | /* Ice Lake */ |
2136 | static const struct tb_cm_ops icm_icl_ops = { | |
2137 | .driver_ready = icm_driver_ready, | |
2138 | .start = icm_start, | |
2139 | .stop = icm_stop, | |
2140 | .complete = icm_complete, | |
2141 | .runtime_suspend = icm_runtime_suspend, | |
2142 | .runtime_resume = icm_runtime_resume, | |
2143 | .handle_event = icm_handle_event, | |
2144 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, | |
2145 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, | |
2146 | }; | |
2147 | ||
f67cf491 MW |
2148 | struct tb *icm_probe(struct tb_nhi *nhi) |
2149 | { | |
2150 | struct icm *icm; | |
2151 | struct tb *tb; | |
2152 | ||
2153 | tb = tb_domain_alloc(nhi, sizeof(struct icm)); | |
2154 | if (!tb) | |
2155 | return NULL; | |
2156 | ||
2157 | icm = tb_priv(tb); | |
2158 | INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); | |
2159 | mutex_init(&icm->request_lock); | |
2160 | ||
2161 | switch (nhi->pdev->device) { | |
2162 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: | |
2163 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: | |
f437c24b | 2164 | icm->can_upgrade_nvm = true; |
f67cf491 MW |
2165 | icm->is_supported = icm_fr_is_supported; |
2166 | icm->get_route = icm_fr_get_route; | |
d04522fa | 2167 | icm->save_devices = icm_fr_save_devices; |
3080e197 | 2168 | icm->driver_ready = icm_fr_driver_ready; |
f67cf491 MW |
2169 | icm->device_connected = icm_fr_device_connected; |
2170 | icm->device_disconnected = icm_fr_device_disconnected; | |
d1ff7024 MW |
2171 | icm->xdomain_connected = icm_fr_xdomain_connected; |
2172 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; | |
f67cf491 MW |
2173 | tb->cm_ops = &icm_fr_ops; |
2174 | break; | |
2175 | ||
2176 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: | |
2177 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: | |
2178 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: | |
2179 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: | |
2180 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: | |
9aaa3b8b | 2181 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
f437c24b MW |
2182 | /* |
2183 | * NVM upgrade has not been tested on Apple systems and | |
2184 | * they don't provide images publicly either. To be on | |
2185 | * the safe side prevent root switch NVM upgrade on Macs | |
2186 | * for now. | |
2187 | */ | |
2188 | icm->can_upgrade_nvm = !x86_apple_machine; | |
f67cf491 | 2189 | icm->is_supported = icm_ar_is_supported; |
0d53827d | 2190 | icm->cio_reset = icm_ar_cio_reset; |
f67cf491 MW |
2191 | icm->get_mode = icm_ar_get_mode; |
2192 | icm->get_route = icm_ar_get_route; | |
d04522fa | 2193 | icm->save_devices = icm_fr_save_devices; |
9aaa3b8b | 2194 | icm->driver_ready = icm_ar_driver_ready; |
f67cf491 MW |
2195 | icm->device_connected = icm_fr_device_connected; |
2196 | icm->device_disconnected = icm_fr_device_disconnected; | |
d1ff7024 MW |
2197 | icm->xdomain_connected = icm_fr_xdomain_connected; |
2198 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; | |
9aaa3b8b | 2199 | tb->cm_ops = &icm_ar_ops; |
f67cf491 | 2200 | break; |
4bac471d RM |
2201 | |
2202 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: | |
2203 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: | |
2204 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; | |
f437c24b | 2205 | icm->can_upgrade_nvm = !x86_apple_machine; |
4bac471d | 2206 | icm->is_supported = icm_ar_is_supported; |
0d53827d | 2207 | icm->cio_reset = icm_tr_cio_reset; |
4bac471d RM |
2208 | icm->get_mode = icm_ar_get_mode; |
2209 | icm->driver_ready = icm_tr_driver_ready; | |
2210 | icm->device_connected = icm_tr_device_connected; | |
2211 | icm->device_disconnected = icm_tr_device_disconnected; | |
2212 | icm->xdomain_connected = icm_tr_xdomain_connected; | |
2213 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; | |
2214 | tb->cm_ops = &icm_tr_ops; | |
2215 | break; | |
3cdb9446 MW |
2216 | |
2217 | case PCI_DEVICE_ID_INTEL_ICL_NHI0: | |
2218 | case PCI_DEVICE_ID_INTEL_ICL_NHI1: | |
2219 | icm->is_supported = icm_ar_is_supported; | |
2220 | icm->driver_ready = icm_icl_driver_ready; | |
2221 | icm->set_uuid = icm_icl_set_uuid; | |
2222 | icm->device_connected = icm_icl_device_connected; | |
2223 | icm->device_disconnected = icm_tr_device_disconnected; | |
2224 | icm->xdomain_connected = icm_tr_xdomain_connected; | |
2225 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; | |
2226 | icm->rtd3_veto = icm_icl_rtd3_veto; | |
2227 | tb->cm_ops = &icm_icl_ops; | |
2228 | break; | |
f67cf491 MW |
2229 | } |
2230 | ||
2231 | if (!icm->is_supported || !icm->is_supported(tb)) { | |
2232 | dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); | |
2233 | tb_domain_put(tb); | |
2234 | return NULL; | |
2235 | } | |
2236 | ||
2237 | return tb; | |
2238 | } |