Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a25c8b2f | 2 | /* |
15c6784c | 3 | * Thunderbolt driver - switch/port utility functions |
a25c8b2f AN |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
15c6784c | 6 | * Copyright (C) 2018, Intel Corporation |
a25c8b2f AN |
7 | */ |
8 | ||
9 | #include <linux/delay.h> | |
e6b245cc MW |
10 | #include <linux/idr.h> |
11 | #include <linux/nvmem-provider.h> | |
2d8ff0b5 | 12 | #include <linux/pm_runtime.h> |
e6b245cc | 13 | #include <linux/sizes.h> |
10fefe56 | 14 | #include <linux/slab.h> |
e6b245cc | 15 | #include <linux/vmalloc.h> |
a25c8b2f AN |
16 | |
17 | #include "tb.h" | |
18 | ||
f67cf491 MW |
19 | /* Switch authorization from userspace is serialized by this lock */ |
20 | static DEFINE_MUTEX(switch_lock); | |
21 | ||
e6b245cc MW |
22 | /* Switch NVM support */ |
23 | ||
24 | #define NVM_DEVID 0x05 | |
25 | #define NVM_VERSION 0x08 | |
26 | #define NVM_CSS 0x10 | |
27 | #define NVM_FLASH_SIZE 0x45 | |
28 | ||
29 | #define NVM_MIN_SIZE SZ_32K | |
30 | #define NVM_MAX_SIZE SZ_512K | |
31 | ||
32 | static DEFINE_IDA(nvm_ida); | |
33 | ||
34 | struct nvm_auth_status { | |
35 | struct list_head list; | |
7c39ffe7 | 36 | uuid_t uuid; |
e6b245cc MW |
37 | u32 status; |
38 | }; | |
39 | ||
40 | /* | |
41 | * Hold NVM authentication failure status per switch This information | |
42 | * needs to stay around even when the switch gets power cycled so we | |
43 | * keep it separately. | |
44 | */ | |
45 | static LIST_HEAD(nvm_auth_status_cache); | |
46 | static DEFINE_MUTEX(nvm_auth_status_lock); | |
47 | ||
48 | static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) | |
49 | { | |
50 | struct nvm_auth_status *st; | |
51 | ||
52 | list_for_each_entry(st, &nvm_auth_status_cache, list) { | |
7c39ffe7 | 53 | if (uuid_equal(&st->uuid, sw->uuid)) |
e6b245cc MW |
54 | return st; |
55 | } | |
56 | ||
57 | return NULL; | |
58 | } | |
59 | ||
60 | static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) | |
61 | { | |
62 | struct nvm_auth_status *st; | |
63 | ||
64 | mutex_lock(&nvm_auth_status_lock); | |
65 | st = __nvm_get_auth_status(sw); | |
66 | mutex_unlock(&nvm_auth_status_lock); | |
67 | ||
68 | *status = st ? st->status : 0; | |
69 | } | |
70 | ||
71 | static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) | |
72 | { | |
73 | struct nvm_auth_status *st; | |
74 | ||
75 | if (WARN_ON(!sw->uuid)) | |
76 | return; | |
77 | ||
78 | mutex_lock(&nvm_auth_status_lock); | |
79 | st = __nvm_get_auth_status(sw); | |
80 | ||
81 | if (!st) { | |
82 | st = kzalloc(sizeof(*st), GFP_KERNEL); | |
83 | if (!st) | |
84 | goto unlock; | |
85 | ||
86 | memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); | |
87 | INIT_LIST_HEAD(&st->list); | |
88 | list_add_tail(&st->list, &nvm_auth_status_cache); | |
89 | } | |
90 | ||
91 | st->status = status; | |
92 | unlock: | |
93 | mutex_unlock(&nvm_auth_status_lock); | |
94 | } | |
95 | ||
96 | static void nvm_clear_auth_status(const struct tb_switch *sw) | |
97 | { | |
98 | struct nvm_auth_status *st; | |
99 | ||
100 | mutex_lock(&nvm_auth_status_lock); | |
101 | st = __nvm_get_auth_status(sw); | |
102 | if (st) { | |
103 | list_del(&st->list); | |
104 | kfree(st); | |
105 | } | |
106 | mutex_unlock(&nvm_auth_status_lock); | |
107 | } | |
108 | ||
109 | static int nvm_validate_and_write(struct tb_switch *sw) | |
110 | { | |
111 | unsigned int image_size, hdr_size; | |
112 | const u8 *buf = sw->nvm->buf; | |
113 | u16 ds_size; | |
114 | int ret; | |
115 | ||
116 | if (!buf) | |
117 | return -EINVAL; | |
118 | ||
119 | image_size = sw->nvm->buf_data_size; | |
120 | if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) | |
121 | return -EINVAL; | |
122 | ||
123 | /* | |
124 | * FARB pointer must point inside the image and must at least | |
125 | * contain parts of the digital section we will be reading here. | |
126 | */ | |
127 | hdr_size = (*(u32 *)buf) & 0xffffff; | |
128 | if (hdr_size + NVM_DEVID + 2 >= image_size) | |
129 | return -EINVAL; | |
130 | ||
131 | /* Digital section start should be aligned to 4k page */ | |
132 | if (!IS_ALIGNED(hdr_size, SZ_4K)) | |
133 | return -EINVAL; | |
134 | ||
135 | /* | |
136 | * Read digital section size and check that it also fits inside | |
137 | * the image. | |
138 | */ | |
139 | ds_size = *(u16 *)(buf + hdr_size); | |
140 | if (ds_size >= image_size) | |
141 | return -EINVAL; | |
142 | ||
143 | if (!sw->safe_mode) { | |
144 | u16 device_id; | |
145 | ||
146 | /* | |
147 | * Make sure the device ID in the image matches the one | |
148 | * we read from the switch config space. | |
149 | */ | |
150 | device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); | |
151 | if (device_id != sw->config.device_id) | |
152 | return -EINVAL; | |
153 | ||
154 | if (sw->generation < 3) { | |
155 | /* Write CSS headers first */ | |
156 | ret = dma_port_flash_write(sw->dma_port, | |
157 | DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, | |
158 | DMA_PORT_CSS_MAX_SIZE); | |
159 | if (ret) | |
160 | return ret; | |
161 | } | |
162 | ||
163 | /* Skip headers in the image */ | |
164 | buf += hdr_size; | |
165 | image_size -= hdr_size; | |
166 | } | |
167 | ||
168 | return dma_port_flash_write(sw->dma_port, 0, buf, image_size); | |
169 | } | |
170 | ||
171 | static int nvm_authenticate_host(struct tb_switch *sw) | |
172 | { | |
173 | int ret; | |
174 | ||
175 | /* | |
176 | * Root switch NVM upgrade requires that we disconnect the | |
d1ff7024 | 177 | * existing paths first (in case it is not in safe mode |
e6b245cc MW |
178 | * already). |
179 | */ | |
180 | if (!sw->safe_mode) { | |
d1ff7024 | 181 | ret = tb_domain_disconnect_all_paths(sw->tb); |
e6b245cc MW |
182 | if (ret) |
183 | return ret; | |
184 | /* | |
185 | * The host controller goes away pretty soon after this if | |
186 | * everything goes well so getting timeout is expected. | |
187 | */ | |
188 | ret = dma_port_flash_update_auth(sw->dma_port); | |
189 | return ret == -ETIMEDOUT ? 0 : ret; | |
190 | } | |
191 | ||
192 | /* | |
193 | * From safe mode we can get out by just power cycling the | |
194 | * switch. | |
195 | */ | |
196 | dma_port_power_cycle(sw->dma_port); | |
197 | return 0; | |
198 | } | |
199 | ||
200 | static int nvm_authenticate_device(struct tb_switch *sw) | |
201 | { | |
202 | int ret, retries = 10; | |
203 | ||
204 | ret = dma_port_flash_update_auth(sw->dma_port); | |
205 | if (ret && ret != -ETIMEDOUT) | |
206 | return ret; | |
207 | ||
208 | /* | |
209 | * Poll here for the authentication status. It takes some time | |
210 | * for the device to respond (we get timeout for a while). Once | |
211 | * we get response the device needs to be power cycled in order | |
212 | * to the new NVM to be taken into use. | |
213 | */ | |
214 | do { | |
215 | u32 status; | |
216 | ||
217 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | |
218 | if (ret < 0 && ret != -ETIMEDOUT) | |
219 | return ret; | |
220 | if (ret > 0) { | |
221 | if (status) { | |
222 | tb_sw_warn(sw, "failed to authenticate NVM\n"); | |
223 | nvm_set_auth_status(sw, status); | |
224 | } | |
225 | ||
226 | tb_sw_info(sw, "power cycling the switch now\n"); | |
227 | dma_port_power_cycle(sw->dma_port); | |
228 | return 0; | |
229 | } | |
230 | ||
231 | msleep(500); | |
232 | } while (--retries); | |
233 | ||
234 | return -ETIMEDOUT; | |
235 | } | |
236 | ||
237 | static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, | |
238 | size_t bytes) | |
239 | { | |
240 | struct tb_switch *sw = priv; | |
2d8ff0b5 MW |
241 | int ret; |
242 | ||
243 | pm_runtime_get_sync(&sw->dev); | |
244 | ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); | |
245 | pm_runtime_mark_last_busy(&sw->dev); | |
246 | pm_runtime_put_autosuspend(&sw->dev); | |
e6b245cc | 247 | |
2d8ff0b5 | 248 | return ret; |
e6b245cc MW |
249 | } |
250 | ||
251 | static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, | |
252 | size_t bytes) | |
253 | { | |
254 | struct tb_switch *sw = priv; | |
255 | int ret = 0; | |
256 | ||
257 | if (mutex_lock_interruptible(&switch_lock)) | |
258 | return -ERESTARTSYS; | |
259 | ||
260 | /* | |
261 | * Since writing the NVM image might require some special steps, | |
262 | * for example when CSS headers are written, we cache the image | |
263 | * locally here and handle the special cases when the user asks | |
264 | * us to authenticate the image. | |
265 | */ | |
266 | if (!sw->nvm->buf) { | |
267 | sw->nvm->buf = vmalloc(NVM_MAX_SIZE); | |
268 | if (!sw->nvm->buf) { | |
269 | ret = -ENOMEM; | |
270 | goto unlock; | |
271 | } | |
272 | } | |
273 | ||
274 | sw->nvm->buf_data_size = offset + bytes; | |
275 | memcpy(sw->nvm->buf + offset, val, bytes); | |
276 | ||
277 | unlock: | |
278 | mutex_unlock(&switch_lock); | |
279 | ||
280 | return ret; | |
281 | } | |
282 | ||
283 | static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, | |
284 | size_t size, bool active) | |
285 | { | |
286 | struct nvmem_config config; | |
287 | ||
288 | memset(&config, 0, sizeof(config)); | |
289 | ||
290 | if (active) { | |
291 | config.name = "nvm_active"; | |
292 | config.reg_read = tb_switch_nvm_read; | |
800161bd | 293 | config.read_only = true; |
e6b245cc MW |
294 | } else { |
295 | config.name = "nvm_non_active"; | |
296 | config.reg_write = tb_switch_nvm_write; | |
800161bd | 297 | config.root_only = true; |
e6b245cc MW |
298 | } |
299 | ||
300 | config.id = id; | |
301 | config.stride = 4; | |
302 | config.word_size = 4; | |
303 | config.size = size; | |
304 | config.dev = &sw->dev; | |
305 | config.owner = THIS_MODULE; | |
e6b245cc MW |
306 | config.priv = sw; |
307 | ||
308 | return nvmem_register(&config); | |
309 | } | |
310 | ||
311 | static int tb_switch_nvm_add(struct tb_switch *sw) | |
312 | { | |
313 | struct nvmem_device *nvm_dev; | |
314 | struct tb_switch_nvm *nvm; | |
315 | u32 val; | |
316 | int ret; | |
317 | ||
318 | if (!sw->dma_port) | |
319 | return 0; | |
320 | ||
321 | nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); | |
322 | if (!nvm) | |
323 | return -ENOMEM; | |
324 | ||
325 | nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); | |
326 | ||
327 | /* | |
328 | * If the switch is in safe-mode the only accessible portion of | |
329 | * the NVM is the non-active one where userspace is expected to | |
330 | * write new functional NVM. | |
331 | */ | |
332 | if (!sw->safe_mode) { | |
333 | u32 nvm_size, hdr_size; | |
334 | ||
335 | ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, | |
336 | sizeof(val)); | |
337 | if (ret) | |
338 | goto err_ida; | |
339 | ||
340 | hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; | |
341 | nvm_size = (SZ_1M << (val & 7)) / 8; | |
342 | nvm_size = (nvm_size - hdr_size) / 2; | |
343 | ||
344 | ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, | |
345 | sizeof(val)); | |
346 | if (ret) | |
347 | goto err_ida; | |
348 | ||
349 | nvm->major = val >> 16; | |
350 | nvm->minor = val >> 8; | |
351 | ||
352 | nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); | |
353 | if (IS_ERR(nvm_dev)) { | |
354 | ret = PTR_ERR(nvm_dev); | |
355 | goto err_ida; | |
356 | } | |
357 | nvm->active = nvm_dev; | |
358 | } | |
359 | ||
360 | nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); | |
361 | if (IS_ERR(nvm_dev)) { | |
362 | ret = PTR_ERR(nvm_dev); | |
363 | goto err_nvm_active; | |
364 | } | |
365 | nvm->non_active = nvm_dev; | |
366 | ||
367 | mutex_lock(&switch_lock); | |
368 | sw->nvm = nvm; | |
369 | mutex_unlock(&switch_lock); | |
370 | ||
371 | return 0; | |
372 | ||
373 | err_nvm_active: | |
374 | if (nvm->active) | |
375 | nvmem_unregister(nvm->active); | |
376 | err_ida: | |
377 | ida_simple_remove(&nvm_ida, nvm->id); | |
378 | kfree(nvm); | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
383 | static void tb_switch_nvm_remove(struct tb_switch *sw) | |
384 | { | |
385 | struct tb_switch_nvm *nvm; | |
386 | ||
387 | mutex_lock(&switch_lock); | |
388 | nvm = sw->nvm; | |
389 | sw->nvm = NULL; | |
390 | mutex_unlock(&switch_lock); | |
391 | ||
392 | if (!nvm) | |
393 | return; | |
394 | ||
395 | /* Remove authentication status in case the switch is unplugged */ | |
396 | if (!nvm->authenticating) | |
397 | nvm_clear_auth_status(sw); | |
398 | ||
399 | nvmem_unregister(nvm->non_active); | |
400 | if (nvm->active) | |
401 | nvmem_unregister(nvm->active); | |
402 | ida_simple_remove(&nvm_ida, nvm->id); | |
403 | vfree(nvm->buf); | |
404 | kfree(nvm); | |
405 | } | |
406 | ||
a25c8b2f AN |
407 | /* port utility functions */ |
408 | ||
409 | static const char *tb_port_type(struct tb_regs_port_header *port) | |
410 | { | |
411 | switch (port->type >> 16) { | |
412 | case 0: | |
413 | switch ((u8) port->type) { | |
414 | case 0: | |
415 | return "Inactive"; | |
416 | case 1: | |
417 | return "Port"; | |
418 | case 2: | |
419 | return "NHI"; | |
420 | default: | |
421 | return "unknown"; | |
422 | } | |
423 | case 0x2: | |
424 | return "Ethernet"; | |
425 | case 0x8: | |
426 | return "SATA"; | |
427 | case 0xe: | |
428 | return "DP/HDMI"; | |
429 | case 0x10: | |
430 | return "PCIe"; | |
431 | case 0x20: | |
432 | return "USB"; | |
433 | default: | |
434 | return "unknown"; | |
435 | } | |
436 | } | |
437 | ||
438 | static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) | |
439 | { | |
daa5140f MW |
440 | tb_dbg(tb, |
441 | " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", | |
442 | port->port_number, port->vendor_id, port->device_id, | |
443 | port->revision, port->thunderbolt_version, tb_port_type(port), | |
444 | port->type); | |
445 | tb_dbg(tb, " Max hop id (in/out): %d/%d\n", | |
446 | port->max_in_hop_id, port->max_out_hop_id); | |
447 | tb_dbg(tb, " Max counters: %d\n", port->max_counters); | |
448 | tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); | |
a25c8b2f AN |
449 | } |
450 | ||
9da672a4 AN |
451 | /** |
452 | * tb_port_state() - get connectedness state of a port | |
453 | * | |
454 | * The port must have a TB_CAP_PHY (i.e. it should be a real port). | |
455 | * | |
456 | * Return: Returns an enum tb_port_state on success or an error code on failure. | |
457 | */ | |
458 | static int tb_port_state(struct tb_port *port) | |
459 | { | |
460 | struct tb_cap_phy phy; | |
461 | int res; | |
462 | if (port->cap_phy == 0) { | |
463 | tb_port_WARN(port, "does not have a PHY\n"); | |
464 | return -EINVAL; | |
465 | } | |
466 | res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); | |
467 | if (res) | |
468 | return res; | |
469 | return phy.state; | |
470 | } | |
471 | ||
472 | /** | |
473 | * tb_wait_for_port() - wait for a port to become ready | |
474 | * | |
475 | * Wait up to 1 second for a port to reach state TB_PORT_UP. If | |
476 | * wait_if_unplugged is set then we also wait if the port is in state | |
477 | * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after | |
478 | * switch resume). Otherwise we only wait if a device is registered but the link | |
479 | * has not yet been established. | |
480 | * | |
481 | * Return: Returns an error code on failure. Returns 0 if the port is not | |
482 | * connected or failed to reach state TB_PORT_UP within one second. Returns 1 | |
483 | * if the port is connected and in state TB_PORT_UP. | |
484 | */ | |
485 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) | |
486 | { | |
487 | int retries = 10; | |
488 | int state; | |
489 | if (!port->cap_phy) { | |
490 | tb_port_WARN(port, "does not have PHY\n"); | |
491 | return -EINVAL; | |
492 | } | |
493 | if (tb_is_upstream_port(port)) { | |
494 | tb_port_WARN(port, "is the upstream port\n"); | |
495 | return -EINVAL; | |
496 | } | |
497 | ||
498 | while (retries--) { | |
499 | state = tb_port_state(port); | |
500 | if (state < 0) | |
501 | return state; | |
502 | if (state == TB_PORT_DISABLED) { | |
503 | tb_port_info(port, "is disabled (state: 0)\n"); | |
504 | return 0; | |
505 | } | |
506 | if (state == TB_PORT_UNPLUGGED) { | |
507 | if (wait_if_unplugged) { | |
508 | /* used during resume */ | |
509 | tb_port_info(port, | |
510 | "is unplugged (state: 7), retrying...\n"); | |
511 | msleep(100); | |
512 | continue; | |
513 | } | |
514 | tb_port_info(port, "is unplugged (state: 7)\n"); | |
515 | return 0; | |
516 | } | |
517 | if (state == TB_PORT_UP) { | |
518 | tb_port_info(port, | |
519 | "is connected, link is up (state: 2)\n"); | |
520 | return 1; | |
521 | } | |
522 | ||
523 | /* | |
524 | * After plug-in the state is TB_PORT_CONNECTING. Give it some | |
525 | * time. | |
526 | */ | |
527 | tb_port_info(port, | |
528 | "is connected, link is not up (state: %d), retrying...\n", | |
529 | state); | |
530 | msleep(100); | |
531 | } | |
532 | tb_port_warn(port, | |
533 | "failed to reach state TB_PORT_UP. Ignoring port...\n"); | |
534 | return 0; | |
535 | } | |
536 | ||
520b6702 AN |
537 | /** |
538 | * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port | |
539 | * | |
540 | * Change the number of NFC credits allocated to @port by @credits. To remove | |
541 | * NFC credits pass a negative amount of credits. | |
542 | * | |
543 | * Return: Returns 0 on success or an error code on failure. | |
544 | */ | |
545 | int tb_port_add_nfc_credits(struct tb_port *port, int credits) | |
546 | { | |
547 | if (credits == 0) | |
548 | return 0; | |
549 | tb_port_info(port, | |
550 | "adding %#x NFC credits (%#x -> %#x)", | |
551 | credits, | |
552 | port->config.nfc_credits, | |
553 | port->config.nfc_credits + credits); | |
554 | port->config.nfc_credits += credits; | |
555 | return tb_port_write(port, &port->config.nfc_credits, | |
556 | TB_CFG_PORT, 4, 1); | |
557 | } | |
558 | ||
559 | /** | |
560 | * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER | |
561 | * | |
562 | * Return: Returns 0 on success or an error code on failure. | |
563 | */ | |
564 | int tb_port_clear_counter(struct tb_port *port, int counter) | |
565 | { | |
566 | u32 zero[3] = { 0, 0, 0 }; | |
567 | tb_port_info(port, "clearing counter %d\n", counter); | |
568 | return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); | |
569 | } | |
570 | ||
a25c8b2f AN |
571 | /** |
572 | * tb_init_port() - initialize a port | |
573 | * | |
574 | * This is a helper method for tb_switch_alloc. Does not check or initialize | |
575 | * any downstream switches. | |
576 | * | |
577 | * Return: Returns 0 on success or an error code on failure. | |
578 | */ | |
343fcb8c | 579 | static int tb_init_port(struct tb_port *port) |
a25c8b2f AN |
580 | { |
581 | int res; | |
9da672a4 | 582 | int cap; |
343fcb8c | 583 | |
a25c8b2f AN |
584 | res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); |
585 | if (res) | |
586 | return res; | |
587 | ||
9da672a4 | 588 | /* Port 0 is the switch itself and has no PHY. */ |
343fcb8c | 589 | if (port->config.type == TB_TYPE_PORT && port->port != 0) { |
da2da04b | 590 | cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); |
9da672a4 AN |
591 | |
592 | if (cap > 0) | |
593 | port->cap_phy = cap; | |
594 | else | |
595 | tb_port_WARN(port, "non switch port without a PHY\n"); | |
596 | } | |
597 | ||
343fcb8c | 598 | tb_dump_port(port->sw->tb, &port->config); |
a25c8b2f AN |
599 | |
600 | /* TODO: Read dual link port, DP port and more from EEPROM. */ | |
601 | return 0; | |
602 | ||
603 | } | |
604 | ||
605 | /* switch utility functions */ | |
606 | ||
607 | static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) | |
608 | { | |
daa5140f MW |
609 | tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n", |
610 | sw->vendor_id, sw->device_id, sw->revision, | |
611 | sw->thunderbolt_version); | |
612 | tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number); | |
613 | tb_dbg(tb, " Config:\n"); | |
614 | tb_dbg(tb, | |
a25c8b2f | 615 | " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", |
daa5140f MW |
616 | sw->upstream_port_number, sw->depth, |
617 | (((u64) sw->route_hi) << 32) | sw->route_lo, | |
618 | sw->enabled, sw->plug_events_delay); | |
619 | tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", | |
620 | sw->__unknown1, sw->__unknown4); | |
a25c8b2f AN |
621 | } |
622 | ||
23dd5bb4 AN |
623 | /** |
624 | * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET | |
625 | * | |
626 | * Return: Returns 0 on success or an error code on failure. | |
627 | */ | |
628 | int tb_switch_reset(struct tb *tb, u64 route) | |
629 | { | |
630 | struct tb_cfg_result res; | |
631 | struct tb_regs_switch_header header = { | |
632 | header.route_hi = route >> 32, | |
633 | header.route_lo = route, | |
634 | header.enabled = true, | |
635 | }; | |
daa5140f | 636 | tb_dbg(tb, "resetting switch at %llx\n", route); |
23dd5bb4 AN |
637 | res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, |
638 | 0, 2, 2, 2); | |
639 | if (res.err) | |
640 | return res.err; | |
641 | res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); | |
642 | if (res.err > 0) | |
643 | return -EIO; | |
644 | return res.err; | |
645 | } | |
646 | ||
053596d9 AN |
647 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route) |
648 | { | |
649 | u8 next_port = route; /* | |
650 | * Routes use a stride of 8 bits, | |
651 | * eventhough a port index has 6 bits at most. | |
652 | * */ | |
653 | if (route == 0) | |
654 | return sw; | |
655 | if (next_port > sw->config.max_port_number) | |
c9c2deef | 656 | return NULL; |
053596d9 | 657 | if (tb_is_upstream_port(&sw->ports[next_port])) |
c9c2deef | 658 | return NULL; |
053596d9 | 659 | if (!sw->ports[next_port].remote) |
c9c2deef | 660 | return NULL; |
053596d9 AN |
661 | return get_switch_at_route(sw->ports[next_port].remote->sw, |
662 | route >> TB_ROUTE_SHIFT); | |
663 | } | |
664 | ||
ca389f71 AN |
665 | /** |
666 | * tb_plug_events_active() - enable/disable plug events on a switch | |
667 | * | |
668 | * Also configures a sane plug_events_delay of 255ms. | |
669 | * | |
670 | * Return: Returns 0 on success or an error code on failure. | |
671 | */ | |
672 | static int tb_plug_events_active(struct tb_switch *sw, bool active) | |
673 | { | |
674 | u32 data; | |
675 | int res; | |
676 | ||
bfe778ac MW |
677 | if (!sw->config.enabled) |
678 | return 0; | |
679 | ||
ca389f71 AN |
680 | sw->config.plug_events_delay = 0xff; |
681 | res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); | |
682 | if (res) | |
683 | return res; | |
684 | ||
685 | res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); | |
686 | if (res) | |
687 | return res; | |
688 | ||
689 | if (active) { | |
690 | data = data & 0xFFFFFF83; | |
691 | switch (sw->config.device_id) { | |
1d111406 LW |
692 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
693 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: | |
694 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: | |
ca389f71 AN |
695 | break; |
696 | default: | |
697 | data |= 4; | |
698 | } | |
699 | } else { | |
700 | data = data | 0x7c; | |
701 | } | |
702 | return tb_sw_write(sw, &data, TB_CFG_SWITCH, | |
703 | sw->cap_plug_events + 1, 1); | |
704 | } | |
705 | ||
f67cf491 MW |
706 | static ssize_t authorized_show(struct device *dev, |
707 | struct device_attribute *attr, | |
708 | char *buf) | |
709 | { | |
710 | struct tb_switch *sw = tb_to_switch(dev); | |
711 | ||
712 | return sprintf(buf, "%u\n", sw->authorized); | |
713 | } | |
714 | ||
715 | static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) | |
716 | { | |
717 | int ret = -EINVAL; | |
718 | ||
719 | if (mutex_lock_interruptible(&switch_lock)) | |
720 | return -ERESTARTSYS; | |
721 | ||
722 | if (sw->authorized) | |
723 | goto unlock; | |
724 | ||
a03e8289 MW |
725 | /* |
726 | * Make sure there is no PCIe rescan ongoing when a new PCIe | |
727 | * tunnel is created. Otherwise the PCIe rescan code might find | |
728 | * the new tunnel too early. | |
729 | */ | |
730 | pci_lock_rescan_remove(); | |
2d8ff0b5 | 731 | pm_runtime_get_sync(&sw->dev); |
a03e8289 | 732 | |
f67cf491 MW |
733 | switch (val) { |
734 | /* Approve switch */ | |
735 | case 1: | |
736 | if (sw->key) | |
737 | ret = tb_domain_approve_switch_key(sw->tb, sw); | |
738 | else | |
739 | ret = tb_domain_approve_switch(sw->tb, sw); | |
740 | break; | |
741 | ||
742 | /* Challenge switch */ | |
743 | case 2: | |
744 | if (sw->key) | |
745 | ret = tb_domain_challenge_switch_key(sw->tb, sw); | |
746 | break; | |
747 | ||
748 | default: | |
749 | break; | |
750 | } | |
751 | ||
2d8ff0b5 MW |
752 | pm_runtime_mark_last_busy(&sw->dev); |
753 | pm_runtime_put_autosuspend(&sw->dev); | |
a03e8289 MW |
754 | pci_unlock_rescan_remove(); |
755 | ||
f67cf491 MW |
756 | if (!ret) { |
757 | sw->authorized = val; | |
758 | /* Notify status change to the userspace */ | |
759 | kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); | |
760 | } | |
761 | ||
762 | unlock: | |
763 | mutex_unlock(&switch_lock); | |
764 | return ret; | |
765 | } | |
766 | ||
767 | static ssize_t authorized_store(struct device *dev, | |
768 | struct device_attribute *attr, | |
769 | const char *buf, size_t count) | |
770 | { | |
771 | struct tb_switch *sw = tb_to_switch(dev); | |
772 | unsigned int val; | |
773 | ssize_t ret; | |
774 | ||
775 | ret = kstrtouint(buf, 0, &val); | |
776 | if (ret) | |
777 | return ret; | |
778 | if (val > 2) | |
779 | return -EINVAL; | |
780 | ||
781 | ret = tb_switch_set_authorized(sw, val); | |
782 | ||
783 | return ret ? ret : count; | |
784 | } | |
785 | static DEVICE_ATTR_RW(authorized); | |
786 | ||
14862ee3 YB |
787 | static ssize_t boot_show(struct device *dev, struct device_attribute *attr, |
788 | char *buf) | |
789 | { | |
790 | struct tb_switch *sw = tb_to_switch(dev); | |
791 | ||
792 | return sprintf(buf, "%u\n", sw->boot); | |
793 | } | |
794 | static DEVICE_ATTR_RO(boot); | |
795 | ||
bfe778ac MW |
796 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
797 | char *buf) | |
798 | { | |
799 | struct tb_switch *sw = tb_to_switch(dev); | |
ca389f71 | 800 | |
bfe778ac MW |
801 | return sprintf(buf, "%#x\n", sw->device); |
802 | } | |
803 | static DEVICE_ATTR_RO(device); | |
804 | ||
72ee3390 MW |
805 | static ssize_t |
806 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
807 | { | |
808 | struct tb_switch *sw = tb_to_switch(dev); | |
809 | ||
810 | return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); | |
811 | } | |
812 | static DEVICE_ATTR_RO(device_name); | |
813 | ||
f67cf491 MW |
814 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
815 | char *buf) | |
816 | { | |
817 | struct tb_switch *sw = tb_to_switch(dev); | |
818 | ssize_t ret; | |
819 | ||
820 | if (mutex_lock_interruptible(&switch_lock)) | |
821 | return -ERESTARTSYS; | |
822 | ||
823 | if (sw->key) | |
824 | ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); | |
825 | else | |
826 | ret = sprintf(buf, "\n"); | |
827 | ||
828 | mutex_unlock(&switch_lock); | |
829 | return ret; | |
830 | } | |
831 | ||
832 | static ssize_t key_store(struct device *dev, struct device_attribute *attr, | |
833 | const char *buf, size_t count) | |
834 | { | |
835 | struct tb_switch *sw = tb_to_switch(dev); | |
836 | u8 key[TB_SWITCH_KEY_SIZE]; | |
837 | ssize_t ret = count; | |
e545f0d8 | 838 | bool clear = false; |
f67cf491 | 839 | |
e545f0d8 BY |
840 | if (!strcmp(buf, "\n")) |
841 | clear = true; | |
842 | else if (hex2bin(key, buf, sizeof(key))) | |
f67cf491 MW |
843 | return -EINVAL; |
844 | ||
845 | if (mutex_lock_interruptible(&switch_lock)) | |
846 | return -ERESTARTSYS; | |
847 | ||
848 | if (sw->authorized) { | |
849 | ret = -EBUSY; | |
850 | } else { | |
851 | kfree(sw->key); | |
e545f0d8 BY |
852 | if (clear) { |
853 | sw->key = NULL; | |
854 | } else { | |
855 | sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); | |
856 | if (!sw->key) | |
857 | ret = -ENOMEM; | |
858 | } | |
f67cf491 MW |
859 | } |
860 | ||
861 | mutex_unlock(&switch_lock); | |
862 | return ret; | |
863 | } | |
0956e411 | 864 | static DEVICE_ATTR(key, 0600, key_show, key_store); |
f67cf491 | 865 | |
1830b6ee MW |
866 | static void nvm_authenticate_start(struct tb_switch *sw) |
867 | { | |
868 | struct pci_dev *root_port; | |
869 | ||
870 | /* | |
871 | * During host router NVM upgrade we should not allow root port to | |
872 | * go into D3cold because some root ports cannot trigger PME | |
873 | * itself. To be on the safe side keep the root port in D0 during | |
874 | * the whole upgrade process. | |
875 | */ | |
876 | root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); | |
877 | if (root_port) | |
878 | pm_runtime_get_noresume(&root_port->dev); | |
879 | } | |
880 | ||
881 | static void nvm_authenticate_complete(struct tb_switch *sw) | |
882 | { | |
883 | struct pci_dev *root_port; | |
884 | ||
885 | root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); | |
886 | if (root_port) | |
887 | pm_runtime_put(&root_port->dev); | |
888 | } | |
889 | ||
e6b245cc MW |
890 | static ssize_t nvm_authenticate_show(struct device *dev, |
891 | struct device_attribute *attr, char *buf) | |
892 | { | |
893 | struct tb_switch *sw = tb_to_switch(dev); | |
894 | u32 status; | |
895 | ||
896 | nvm_get_auth_status(sw, &status); | |
897 | return sprintf(buf, "%#x\n", status); | |
898 | } | |
899 | ||
900 | static ssize_t nvm_authenticate_store(struct device *dev, | |
901 | struct device_attribute *attr, const char *buf, size_t count) | |
902 | { | |
903 | struct tb_switch *sw = tb_to_switch(dev); | |
904 | bool val; | |
905 | int ret; | |
906 | ||
907 | if (mutex_lock_interruptible(&switch_lock)) | |
908 | return -ERESTARTSYS; | |
909 | ||
910 | /* If NVMem devices are not yet added */ | |
911 | if (!sw->nvm) { | |
912 | ret = -EAGAIN; | |
913 | goto exit_unlock; | |
914 | } | |
915 | ||
916 | ret = kstrtobool(buf, &val); | |
917 | if (ret) | |
918 | goto exit_unlock; | |
919 | ||
920 | /* Always clear the authentication status */ | |
921 | nvm_clear_auth_status(sw); | |
922 | ||
923 | if (val) { | |
2d8ff0b5 MW |
924 | if (!sw->nvm->buf) { |
925 | ret = -EINVAL; | |
926 | goto exit_unlock; | |
927 | } | |
928 | ||
929 | pm_runtime_get_sync(&sw->dev); | |
e6b245cc | 930 | ret = nvm_validate_and_write(sw); |
2d8ff0b5 MW |
931 | if (ret) { |
932 | pm_runtime_mark_last_busy(&sw->dev); | |
933 | pm_runtime_put_autosuspend(&sw->dev); | |
e6b245cc | 934 | goto exit_unlock; |
2d8ff0b5 | 935 | } |
e6b245cc MW |
936 | |
937 | sw->nvm->authenticating = true; | |
938 | ||
1830b6ee MW |
939 | if (!tb_route(sw)) { |
940 | /* | |
941 | * Keep root port from suspending as long as the | |
942 | * NVM upgrade process is running. | |
943 | */ | |
944 | nvm_authenticate_start(sw); | |
e6b245cc | 945 | ret = nvm_authenticate_host(sw); |
1830b6ee MW |
946 | if (ret) |
947 | nvm_authenticate_complete(sw); | |
948 | } else { | |
e6b245cc | 949 | ret = nvm_authenticate_device(sw); |
1830b6ee | 950 | } |
2d8ff0b5 MW |
951 | pm_runtime_mark_last_busy(&sw->dev); |
952 | pm_runtime_put_autosuspend(&sw->dev); | |
e6b245cc MW |
953 | } |
954 | ||
955 | exit_unlock: | |
956 | mutex_unlock(&switch_lock); | |
957 | ||
958 | if (ret) | |
959 | return ret; | |
960 | return count; | |
961 | } | |
962 | static DEVICE_ATTR_RW(nvm_authenticate); | |
963 | ||
964 | static ssize_t nvm_version_show(struct device *dev, | |
965 | struct device_attribute *attr, char *buf) | |
966 | { | |
967 | struct tb_switch *sw = tb_to_switch(dev); | |
968 | int ret; | |
969 | ||
970 | if (mutex_lock_interruptible(&switch_lock)) | |
971 | return -ERESTARTSYS; | |
972 | ||
973 | if (sw->safe_mode) | |
974 | ret = -ENODATA; | |
975 | else if (!sw->nvm) | |
976 | ret = -EAGAIN; | |
977 | else | |
978 | ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); | |
979 | ||
980 | mutex_unlock(&switch_lock); | |
981 | ||
982 | return ret; | |
983 | } | |
984 | static DEVICE_ATTR_RO(nvm_version); | |
985 | ||
bfe778ac MW |
986 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
987 | char *buf) | |
a25c8b2f | 988 | { |
bfe778ac | 989 | struct tb_switch *sw = tb_to_switch(dev); |
a25c8b2f | 990 | |
bfe778ac MW |
991 | return sprintf(buf, "%#x\n", sw->vendor); |
992 | } | |
993 | static DEVICE_ATTR_RO(vendor); | |
994 | ||
72ee3390 MW |
995 | static ssize_t |
996 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
997 | { | |
998 | struct tb_switch *sw = tb_to_switch(dev); | |
999 | ||
1000 | return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); | |
1001 | } | |
1002 | static DEVICE_ATTR_RO(vendor_name); | |
1003 | ||
bfe778ac MW |
1004 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
1005 | char *buf) | |
1006 | { | |
1007 | struct tb_switch *sw = tb_to_switch(dev); | |
1008 | ||
1009 | return sprintf(buf, "%pUb\n", sw->uuid); | |
1010 | } | |
1011 | static DEVICE_ATTR_RO(unique_id); | |
1012 | ||
1013 | static struct attribute *switch_attrs[] = { | |
f67cf491 | 1014 | &dev_attr_authorized.attr, |
14862ee3 | 1015 | &dev_attr_boot.attr, |
bfe778ac | 1016 | &dev_attr_device.attr, |
72ee3390 | 1017 | &dev_attr_device_name.attr, |
f67cf491 | 1018 | &dev_attr_key.attr, |
e6b245cc MW |
1019 | &dev_attr_nvm_authenticate.attr, |
1020 | &dev_attr_nvm_version.attr, | |
bfe778ac | 1021 | &dev_attr_vendor.attr, |
72ee3390 | 1022 | &dev_attr_vendor_name.attr, |
bfe778ac MW |
1023 | &dev_attr_unique_id.attr, |
1024 | NULL, | |
1025 | }; | |
1026 | ||
f67cf491 MW |
1027 | static umode_t switch_attr_is_visible(struct kobject *kobj, |
1028 | struct attribute *attr, int n) | |
1029 | { | |
1030 | struct device *dev = container_of(kobj, struct device, kobj); | |
1031 | struct tb_switch *sw = tb_to_switch(dev); | |
1032 | ||
1033 | if (attr == &dev_attr_key.attr) { | |
1034 | if (tb_route(sw) && | |
1035 | sw->tb->security_level == TB_SECURITY_SECURE && | |
1036 | sw->security_level == TB_SECURITY_SECURE) | |
1037 | return attr->mode; | |
1038 | return 0; | |
e6b245cc MW |
1039 | } else if (attr == &dev_attr_nvm_authenticate.attr || |
1040 | attr == &dev_attr_nvm_version.attr) { | |
1041 | if (sw->dma_port) | |
1042 | return attr->mode; | |
1043 | return 0; | |
14862ee3 YB |
1044 | } else if (attr == &dev_attr_boot.attr) { |
1045 | if (tb_route(sw)) | |
1046 | return attr->mode; | |
1047 | return 0; | |
f67cf491 MW |
1048 | } |
1049 | ||
e6b245cc | 1050 | return sw->safe_mode ? 0 : attr->mode; |
f67cf491 MW |
1051 | } |
1052 | ||
bfe778ac | 1053 | static struct attribute_group switch_group = { |
f67cf491 | 1054 | .is_visible = switch_attr_is_visible, |
bfe778ac MW |
1055 | .attrs = switch_attrs, |
1056 | }; | |
ca389f71 | 1057 | |
bfe778ac MW |
1058 | static const struct attribute_group *switch_groups[] = { |
1059 | &switch_group, | |
1060 | NULL, | |
1061 | }; | |
1062 | ||
1063 | static void tb_switch_release(struct device *dev) | |
1064 | { | |
1065 | struct tb_switch *sw = tb_to_switch(dev); | |
1066 | ||
3e136768 MW |
1067 | dma_port_free(sw->dma_port); |
1068 | ||
bfe778ac | 1069 | kfree(sw->uuid); |
72ee3390 MW |
1070 | kfree(sw->device_name); |
1071 | kfree(sw->vendor_name); | |
a25c8b2f | 1072 | kfree(sw->ports); |
343fcb8c | 1073 | kfree(sw->drom); |
f67cf491 | 1074 | kfree(sw->key); |
a25c8b2f AN |
1075 | kfree(sw); |
1076 | } | |
1077 | ||
2d8ff0b5 MW |
1078 | /* |
1079 | * Currently only need to provide the callbacks. Everything else is handled | |
1080 | * in the connection manager. | |
1081 | */ | |
1082 | static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) | |
1083 | { | |
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static int __maybe_unused tb_switch_runtime_resume(struct device *dev) | |
1088 | { | |
1089 | return 0; | |
1090 | } | |
1091 | ||
1092 | static const struct dev_pm_ops tb_switch_pm_ops = { | |
1093 | SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, | |
1094 | NULL) | |
1095 | }; | |
1096 | ||
bfe778ac MW |
1097 | struct device_type tb_switch_type = { |
1098 | .name = "thunderbolt_device", | |
1099 | .release = tb_switch_release, | |
2d8ff0b5 | 1100 | .pm = &tb_switch_pm_ops, |
bfe778ac MW |
1101 | }; |
1102 | ||
2c3c4197 MW |
1103 | static int tb_switch_get_generation(struct tb_switch *sw) |
1104 | { | |
1105 | switch (sw->config.device_id) { | |
1106 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: | |
1107 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: | |
1108 | case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: | |
1109 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: | |
1110 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: | |
1111 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: | |
1112 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: | |
1113 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: | |
1114 | return 1; | |
1115 | ||
1116 | case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: | |
1117 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: | |
1118 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: | |
1119 | return 2; | |
1120 | ||
1121 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: | |
1122 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: | |
1123 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: | |
1124 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: | |
1125 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: | |
4bac471d RM |
1126 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
1127 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: | |
1128 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: | |
2c3c4197 MW |
1129 | return 3; |
1130 | ||
1131 | default: | |
1132 | /* | |
1133 | * For unknown switches assume generation to be 1 to be | |
1134 | * on the safe side. | |
1135 | */ | |
1136 | tb_sw_warn(sw, "unsupported switch device id %#x\n", | |
1137 | sw->config.device_id); | |
1138 | return 1; | |
1139 | } | |
1140 | } | |
1141 | ||
a25c8b2f | 1142 | /** |
bfe778ac MW |
1143 | * tb_switch_alloc() - allocate a switch |
1144 | * @tb: Pointer to the owning domain | |
1145 | * @parent: Parent device for this switch | |
1146 | * @route: Route string for this switch | |
a25c8b2f | 1147 | * |
bfe778ac MW |
1148 | * Allocates and initializes a switch. Will not upload configuration to |
1149 | * the switch. For that you need to call tb_switch_configure() | |
1150 | * separately. The returned switch should be released by calling | |
1151 | * tb_switch_put(). | |
1152 | * | |
1153 | * Return: Pointer to the allocated switch or %NULL in case of failure | |
a25c8b2f | 1154 | */ |
bfe778ac MW |
1155 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
1156 | u64 route) | |
a25c8b2f AN |
1157 | { |
1158 | int i; | |
ca389f71 | 1159 | int cap; |
a25c8b2f AN |
1160 | struct tb_switch *sw; |
1161 | int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); | |
1162 | if (upstream_port < 0) | |
1163 | return NULL; | |
1164 | ||
1165 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | |
1166 | if (!sw) | |
1167 | return NULL; | |
1168 | ||
1169 | sw->tb = tb; | |
aae20bb6 | 1170 | if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) |
bfe778ac MW |
1171 | goto err_free_sw_ports; |
1172 | ||
daa5140f | 1173 | tb_dbg(tb, "current switch config:\n"); |
a25c8b2f AN |
1174 | tb_dump_switch(tb, &sw->config); |
1175 | ||
1176 | /* configure switch */ | |
1177 | sw->config.upstream_port_number = upstream_port; | |
1178 | sw->config.depth = tb_route_length(route); | |
1179 | sw->config.route_lo = route; | |
1180 | sw->config.route_hi = route >> 32; | |
bfe778ac | 1181 | sw->config.enabled = 0; |
a25c8b2f AN |
1182 | |
1183 | /* initialize ports */ | |
1184 | sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), | |
343fcb8c | 1185 | GFP_KERNEL); |
a25c8b2f | 1186 | if (!sw->ports) |
bfe778ac | 1187 | goto err_free_sw_ports; |
a25c8b2f AN |
1188 | |
1189 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
343fcb8c AN |
1190 | /* minimum setup for tb_find_cap and tb_drom_read to work */ |
1191 | sw->ports[i].sw = sw; | |
1192 | sw->ports[i].port = i; | |
a25c8b2f AN |
1193 | } |
1194 | ||
2c3c4197 MW |
1195 | sw->generation = tb_switch_get_generation(sw); |
1196 | ||
da2da04b | 1197 | cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); |
ca389f71 | 1198 | if (cap < 0) { |
da2da04b | 1199 | tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); |
bfe778ac | 1200 | goto err_free_sw_ports; |
ca389f71 AN |
1201 | } |
1202 | sw->cap_plug_events = cap; | |
1203 | ||
f67cf491 MW |
1204 | /* Root switch is always authorized */ |
1205 | if (!route) | |
1206 | sw->authorized = true; | |
1207 | ||
bfe778ac MW |
1208 | device_initialize(&sw->dev); |
1209 | sw->dev.parent = parent; | |
1210 | sw->dev.bus = &tb_bus_type; | |
1211 | sw->dev.type = &tb_switch_type; | |
1212 | sw->dev.groups = switch_groups; | |
1213 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | |
1214 | ||
1215 | return sw; | |
1216 | ||
1217 | err_free_sw_ports: | |
1218 | kfree(sw->ports); | |
1219 | kfree(sw); | |
1220 | ||
1221 | return NULL; | |
1222 | } | |
1223 | ||
e6b245cc MW |
1224 | /** |
1225 | * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode | |
1226 | * @tb: Pointer to the owning domain | |
1227 | * @parent: Parent device for this switch | |
1228 | * @route: Route string for this switch | |
1229 | * | |
1230 | * This creates a switch in safe mode. This means the switch pretty much | |
1231 | * lacks all capabilities except DMA configuration port before it is | |
1232 | * flashed with a valid NVM firmware. | |
1233 | * | |
1234 | * The returned switch must be released by calling tb_switch_put(). | |
1235 | * | |
1236 | * Return: Pointer to the allocated switch or %NULL in case of failure | |
1237 | */ | |
1238 | struct tb_switch * | |
1239 | tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) | |
1240 | { | |
1241 | struct tb_switch *sw; | |
1242 | ||
1243 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | |
1244 | if (!sw) | |
1245 | return NULL; | |
1246 | ||
1247 | sw->tb = tb; | |
1248 | sw->config.depth = tb_route_length(route); | |
1249 | sw->config.route_hi = upper_32_bits(route); | |
1250 | sw->config.route_lo = lower_32_bits(route); | |
1251 | sw->safe_mode = true; | |
1252 | ||
1253 | device_initialize(&sw->dev); | |
1254 | sw->dev.parent = parent; | |
1255 | sw->dev.bus = &tb_bus_type; | |
1256 | sw->dev.type = &tb_switch_type; | |
1257 | sw->dev.groups = switch_groups; | |
1258 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | |
1259 | ||
1260 | return sw; | |
1261 | } | |
1262 | ||
bfe778ac MW |
1263 | /** |
1264 | * tb_switch_configure() - Uploads configuration to the switch | |
1265 | * @sw: Switch to configure | |
1266 | * | |
1267 | * Call this function before the switch is added to the system. It will | |
1268 | * upload configuration to the switch and makes it available for the | |
1269 | * connection manager to use. | |
1270 | * | |
1271 | * Return: %0 in case of success and negative errno in case of failure | |
1272 | */ | |
1273 | int tb_switch_configure(struct tb_switch *sw) | |
1274 | { | |
1275 | struct tb *tb = sw->tb; | |
1276 | u64 route; | |
1277 | int ret; | |
1278 | ||
1279 | route = tb_route(sw); | |
daa5140f MW |
1280 | tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n", |
1281 | route, tb_route_length(route), sw->config.upstream_port_number); | |
bfe778ac MW |
1282 | |
1283 | if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) | |
1284 | tb_sw_warn(sw, "unknown switch vendor id %#x\n", | |
1285 | sw->config.vendor_id); | |
1286 | ||
bfe778ac MW |
1287 | sw->config.enabled = 1; |
1288 | ||
1289 | /* upload configuration */ | |
1290 | ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); | |
1291 | if (ret) | |
1292 | return ret; | |
1293 | ||
1294 | return tb_plug_events_active(sw, true); | |
1295 | } | |
1296 | ||
1297 | static void tb_switch_set_uuid(struct tb_switch *sw) | |
1298 | { | |
1299 | u32 uuid[4]; | |
1300 | int cap; | |
1301 | ||
1302 | if (sw->uuid) | |
1303 | return; | |
1304 | ||
1305 | /* | |
1306 | * The newer controllers include fused UUID as part of link | |
1307 | * controller specific registers | |
1308 | */ | |
1309 | cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); | |
1310 | if (cap > 0) { | |
1311 | tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); | |
1312 | } else { | |
1313 | /* | |
1314 | * ICM generates UUID based on UID and fills the upper | |
1315 | * two words with ones. This is not strictly following | |
1316 | * UUID format but we want to be compatible with it so | |
1317 | * we do the same here. | |
1318 | */ | |
1319 | uuid[0] = sw->uid & 0xffffffff; | |
1320 | uuid[1] = (sw->uid >> 32) & 0xffffffff; | |
1321 | uuid[2] = 0xffffffff; | |
1322 | uuid[3] = 0xffffffff; | |
1323 | } | |
1324 | ||
1325 | sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); | |
1326 | } | |
1327 | ||
e6b245cc | 1328 | static int tb_switch_add_dma_port(struct tb_switch *sw) |
3e136768 | 1329 | { |
e6b245cc MW |
1330 | u32 status; |
1331 | int ret; | |
1332 | ||
3e136768 MW |
1333 | switch (sw->generation) { |
1334 | case 3: | |
1335 | break; | |
1336 | ||
1337 | case 2: | |
1338 | /* Only root switch can be upgraded */ | |
1339 | if (tb_route(sw)) | |
e6b245cc | 1340 | return 0; |
3e136768 MW |
1341 | break; |
1342 | ||
1343 | default: | |
e6b245cc MW |
1344 | /* |
1345 | * DMA port is the only thing available when the switch | |
1346 | * is in safe mode. | |
1347 | */ | |
1348 | if (!sw->safe_mode) | |
1349 | return 0; | |
1350 | break; | |
3e136768 MW |
1351 | } |
1352 | ||
e6b245cc MW |
1353 | if (sw->no_nvm_upgrade) |
1354 | return 0; | |
1355 | ||
3e136768 | 1356 | sw->dma_port = dma_port_alloc(sw); |
e6b245cc MW |
1357 | if (!sw->dma_port) |
1358 | return 0; | |
1359 | ||
1360 | /* | |
1361 | * Check status of the previous flash authentication. If there | |
1362 | * is one we need to power cycle the switch in any case to make | |
1363 | * it functional again. | |
1364 | */ | |
1365 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | |
1366 | if (ret <= 0) | |
1367 | return ret; | |
1368 | ||
1830b6ee MW |
1369 | /* Now we can allow root port to suspend again */ |
1370 | if (!tb_route(sw)) | |
1371 | nvm_authenticate_complete(sw); | |
1372 | ||
e6b245cc MW |
1373 | if (status) { |
1374 | tb_sw_info(sw, "switch flash authentication failed\n"); | |
1375 | tb_switch_set_uuid(sw); | |
1376 | nvm_set_auth_status(sw, status); | |
1377 | } | |
1378 | ||
1379 | tb_sw_info(sw, "power cycling the switch now\n"); | |
1380 | dma_port_power_cycle(sw->dma_port); | |
1381 | ||
1382 | /* | |
1383 | * We return error here which causes the switch adding failure. | |
1384 | * It should appear back after power cycle is complete. | |
1385 | */ | |
1386 | return -ESHUTDOWN; | |
3e136768 MW |
1387 | } |
1388 | ||
bfe778ac MW |
1389 | /** |
1390 | * tb_switch_add() - Add a switch to the domain | |
1391 | * @sw: Switch to add | |
1392 | * | |
1393 | * This is the last step in adding switch to the domain. It will read | |
1394 | * identification information from DROM and initializes ports so that | |
1395 | * they can be used to connect other switches. The switch will be | |
1396 | * exposed to the userspace when this function successfully returns. To | |
1397 | * remove and release the switch, call tb_switch_remove(). | |
1398 | * | |
1399 | * Return: %0 in case of success and negative errno in case of failure | |
1400 | */ | |
1401 | int tb_switch_add(struct tb_switch *sw) | |
1402 | { | |
1403 | int i, ret; | |
1404 | ||
3e136768 MW |
1405 | /* |
1406 | * Initialize DMA control port now before we read DROM. Recent | |
1407 | * host controllers have more complete DROM on NVM that includes | |
1408 | * vendor and model identification strings which we then expose | |
1409 | * to the userspace. NVM can be accessed through DMA | |
1410 | * configuration based mailbox. | |
1411 | */ | |
e6b245cc MW |
1412 | ret = tb_switch_add_dma_port(sw); |
1413 | if (ret) | |
f53e7676 | 1414 | return ret; |
343fcb8c | 1415 | |
e6b245cc MW |
1416 | if (!sw->safe_mode) { |
1417 | /* read drom */ | |
1418 | ret = tb_drom_read(sw); | |
1419 | if (ret) { | |
1420 | tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); | |
1421 | return ret; | |
1422 | } | |
daa5140f | 1423 | tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); |
bfe778ac | 1424 | |
e6b245cc MW |
1425 | tb_switch_set_uuid(sw); |
1426 | ||
1427 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
1428 | if (sw->ports[i].disabled) { | |
daa5140f | 1429 | tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); |
e6b245cc MW |
1430 | continue; |
1431 | } | |
1432 | ret = tb_init_port(&sw->ports[i]); | |
1433 | if (ret) | |
1434 | return ret; | |
343fcb8c | 1435 | } |
343fcb8c AN |
1436 | } |
1437 | ||
e6b245cc MW |
1438 | ret = device_add(&sw->dev); |
1439 | if (ret) | |
1440 | return ret; | |
1441 | ||
a83bc4a5 MW |
1442 | if (tb_route(sw)) { |
1443 | dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", | |
1444 | sw->vendor, sw->device); | |
1445 | if (sw->vendor_name && sw->device_name) | |
1446 | dev_info(&sw->dev, "%s %s\n", sw->vendor_name, | |
1447 | sw->device_name); | |
1448 | } | |
1449 | ||
e6b245cc | 1450 | ret = tb_switch_nvm_add(sw); |
2d8ff0b5 | 1451 | if (ret) { |
e6b245cc | 1452 | device_del(&sw->dev); |
2d8ff0b5 MW |
1453 | return ret; |
1454 | } | |
e6b245cc | 1455 | |
2d8ff0b5 MW |
1456 | pm_runtime_set_active(&sw->dev); |
1457 | if (sw->rpm) { | |
1458 | pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); | |
1459 | pm_runtime_use_autosuspend(&sw->dev); | |
1460 | pm_runtime_mark_last_busy(&sw->dev); | |
1461 | pm_runtime_enable(&sw->dev); | |
1462 | pm_request_autosuspend(&sw->dev); | |
1463 | } | |
1464 | ||
1465 | return 0; | |
bfe778ac | 1466 | } |
c90553b3 | 1467 | |
bfe778ac MW |
1468 | /** |
1469 | * tb_switch_remove() - Remove and release a switch | |
1470 | * @sw: Switch to remove | |
1471 | * | |
1472 | * This will remove the switch from the domain and release it after last | |
1473 | * reference count drops to zero. If there are switches connected below | |
1474 | * this switch, they will be removed as well. | |
1475 | */ | |
1476 | void tb_switch_remove(struct tb_switch *sw) | |
1477 | { | |
1478 | int i; | |
ca389f71 | 1479 | |
2d8ff0b5 MW |
1480 | if (sw->rpm) { |
1481 | pm_runtime_get_sync(&sw->dev); | |
1482 | pm_runtime_disable(&sw->dev); | |
1483 | } | |
1484 | ||
bfe778ac MW |
1485 | /* port 0 is the switch itself and never has a remote */ |
1486 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1487 | if (tb_is_upstream_port(&sw->ports[i])) | |
1488 | continue; | |
1489 | if (sw->ports[i].remote) | |
1490 | tb_switch_remove(sw->ports[i].remote->sw); | |
1491 | sw->ports[i].remote = NULL; | |
d1ff7024 MW |
1492 | if (sw->ports[i].xdomain) |
1493 | tb_xdomain_remove(sw->ports[i].xdomain); | |
1494 | sw->ports[i].xdomain = NULL; | |
bfe778ac MW |
1495 | } |
1496 | ||
1497 | if (!sw->is_unplugged) | |
1498 | tb_plug_events_active(sw, false); | |
1499 | ||
e6b245cc | 1500 | tb_switch_nvm_remove(sw); |
a83bc4a5 MW |
1501 | |
1502 | if (tb_route(sw)) | |
1503 | dev_info(&sw->dev, "device disconnected\n"); | |
bfe778ac | 1504 | device_unregister(&sw->dev); |
a25c8b2f AN |
1505 | } |
1506 | ||
053596d9 | 1507 | /** |
aae20bb6 | 1508 | * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches |
053596d9 | 1509 | */ |
aae20bb6 | 1510 | void tb_sw_set_unplugged(struct tb_switch *sw) |
053596d9 AN |
1511 | { |
1512 | int i; | |
1513 | if (sw == sw->tb->root_switch) { | |
1514 | tb_sw_WARN(sw, "cannot unplug root switch\n"); | |
1515 | return; | |
1516 | } | |
1517 | if (sw->is_unplugged) { | |
1518 | tb_sw_WARN(sw, "is_unplugged already set\n"); | |
1519 | return; | |
1520 | } | |
1521 | sw->is_unplugged = true; | |
1522 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
1523 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | |
aae20bb6 | 1524 | tb_sw_set_unplugged(sw->ports[i].remote->sw); |
053596d9 AN |
1525 | } |
1526 | } | |
1527 | ||
23dd5bb4 AN |
1528 | int tb_switch_resume(struct tb_switch *sw) |
1529 | { | |
1530 | int i, err; | |
daa5140f | 1531 | tb_sw_dbg(sw, "resuming switch\n"); |
23dd5bb4 | 1532 | |
08a5e4ce MW |
1533 | /* |
1534 | * Check for UID of the connected switches except for root | |
1535 | * switch which we assume cannot be removed. | |
1536 | */ | |
1537 | if (tb_route(sw)) { | |
1538 | u64 uid; | |
1539 | ||
1540 | err = tb_drom_read_uid_only(sw, &uid); | |
1541 | if (err) { | |
1542 | tb_sw_warn(sw, "uid read failed\n"); | |
1543 | return err; | |
1544 | } | |
1545 | if (sw->uid != uid) { | |
1546 | tb_sw_info(sw, | |
1547 | "changed while suspended (uid %#llx -> %#llx)\n", | |
1548 | sw->uid, uid); | |
1549 | return -ENODEV; | |
1550 | } | |
23dd5bb4 AN |
1551 | } |
1552 | ||
1553 | /* upload configuration */ | |
1554 | err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); | |
1555 | if (err) | |
1556 | return err; | |
1557 | ||
1558 | err = tb_plug_events_active(sw, true); | |
1559 | if (err) | |
1560 | return err; | |
1561 | ||
1562 | /* check for surviving downstream switches */ | |
1563 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1564 | struct tb_port *port = &sw->ports[i]; | |
1565 | if (tb_is_upstream_port(port)) | |
1566 | continue; | |
1567 | if (!port->remote) | |
1568 | continue; | |
1569 | if (tb_wait_for_port(port, true) <= 0 | |
1570 | || tb_switch_resume(port->remote->sw)) { | |
1571 | tb_port_warn(port, | |
1572 | "lost during suspend, disconnecting\n"); | |
aae20bb6 | 1573 | tb_sw_set_unplugged(port->remote->sw); |
23dd5bb4 AN |
1574 | } |
1575 | } | |
1576 | return 0; | |
1577 | } | |
1578 | ||
1579 | void tb_switch_suspend(struct tb_switch *sw) | |
1580 | { | |
1581 | int i, err; | |
1582 | err = tb_plug_events_active(sw, false); | |
1583 | if (err) | |
1584 | return; | |
1585 | ||
1586 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1587 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | |
1588 | tb_switch_suspend(sw->ports[i].remote->sw); | |
1589 | } | |
1590 | /* | |
1591 | * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any | |
1592 | * effect? | |
1593 | */ | |
1594 | } | |
f67cf491 MW |
1595 | |
1596 | struct tb_sw_lookup { | |
1597 | struct tb *tb; | |
1598 | u8 link; | |
1599 | u8 depth; | |
7c39ffe7 | 1600 | const uuid_t *uuid; |
8e9267bb | 1601 | u64 route; |
f67cf491 MW |
1602 | }; |
1603 | ||
1604 | static int tb_switch_match(struct device *dev, void *data) | |
1605 | { | |
1606 | struct tb_switch *sw = tb_to_switch(dev); | |
1607 | struct tb_sw_lookup *lookup = data; | |
1608 | ||
1609 | if (!sw) | |
1610 | return 0; | |
1611 | if (sw->tb != lookup->tb) | |
1612 | return 0; | |
1613 | ||
1614 | if (lookup->uuid) | |
1615 | return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); | |
1616 | ||
8e9267bb RM |
1617 | if (lookup->route) { |
1618 | return sw->config.route_lo == lower_32_bits(lookup->route) && | |
1619 | sw->config.route_hi == upper_32_bits(lookup->route); | |
1620 | } | |
1621 | ||
f67cf491 MW |
1622 | /* Root switch is matched only by depth */ |
1623 | if (!lookup->depth) | |
1624 | return !sw->depth; | |
1625 | ||
1626 | return sw->link == lookup->link && sw->depth == lookup->depth; | |
1627 | } | |
1628 | ||
1629 | /** | |
1630 | * tb_switch_find_by_link_depth() - Find switch by link and depth | |
1631 | * @tb: Domain the switch belongs | |
1632 | * @link: Link number the switch is connected | |
1633 | * @depth: Depth of the switch in link | |
1634 | * | |
1635 | * Returned switch has reference count increased so the caller needs to | |
1636 | * call tb_switch_put() when done with the switch. | |
1637 | */ | |
1638 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) | |
1639 | { | |
1640 | struct tb_sw_lookup lookup; | |
1641 | struct device *dev; | |
1642 | ||
1643 | memset(&lookup, 0, sizeof(lookup)); | |
1644 | lookup.tb = tb; | |
1645 | lookup.link = link; | |
1646 | lookup.depth = depth; | |
1647 | ||
1648 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1649 | if (dev) | |
1650 | return tb_to_switch(dev); | |
1651 | ||
1652 | return NULL; | |
1653 | } | |
1654 | ||
1655 | /** | |
432019d6 | 1656 | * tb_switch_find_by_uuid() - Find switch by UUID |
f67cf491 MW |
1657 | * @tb: Domain the switch belongs |
1658 | * @uuid: UUID to look for | |
1659 | * | |
1660 | * Returned switch has reference count increased so the caller needs to | |
1661 | * call tb_switch_put() when done with the switch. | |
1662 | */ | |
7c39ffe7 | 1663 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
f67cf491 MW |
1664 | { |
1665 | struct tb_sw_lookup lookup; | |
1666 | struct device *dev; | |
1667 | ||
1668 | memset(&lookup, 0, sizeof(lookup)); | |
1669 | lookup.tb = tb; | |
1670 | lookup.uuid = uuid; | |
1671 | ||
1672 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1673 | if (dev) | |
1674 | return tb_to_switch(dev); | |
1675 | ||
1676 | return NULL; | |
1677 | } | |
e6b245cc | 1678 | |
8e9267bb RM |
1679 | /** |
1680 | * tb_switch_find_by_route() - Find switch by route string | |
1681 | * @tb: Domain the switch belongs | |
1682 | * @route: Route string to look for | |
1683 | * | |
1684 | * Returned switch has reference count increased so the caller needs to | |
1685 | * call tb_switch_put() when done with the switch. | |
1686 | */ | |
1687 | struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) | |
1688 | { | |
1689 | struct tb_sw_lookup lookup; | |
1690 | struct device *dev; | |
1691 | ||
1692 | if (!route) | |
1693 | return tb_switch_get(tb->root_switch); | |
1694 | ||
1695 | memset(&lookup, 0, sizeof(lookup)); | |
1696 | lookup.tb = tb; | |
1697 | lookup.route = route; | |
1698 | ||
1699 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1700 | if (dev) | |
1701 | return tb_to_switch(dev); | |
1702 | ||
1703 | return NULL; | |
1704 | } | |
1705 | ||
e6b245cc MW |
1706 | void tb_switch_exit(void) |
1707 | { | |
1708 | ida_destroy(&nvm_ida); | |
1709 | } |