Commit | Line | Data |
---|---|---|
fd3b339c | 1 | // SPDX-License-Identifier: GPL-2.0 |
9d3cce0b MW |
2 | /* |
3 | * Thunderbolt bus support | |
4 | * | |
5 | * Copyright (C) 2017, Intel Corporation | |
fd3b339c | 6 | * Author: Mika Westerberg <mika.westerberg@linux.intel.com> |
9d3cce0b MW |
7 | */ |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/idr.h> | |
11 | #include <linux/module.h> | |
2d8ff0b5 | 12 | #include <linux/pm_runtime.h> |
9d3cce0b | 13 | #include <linux/slab.h> |
f67cf491 MW |
14 | #include <linux/random.h> |
15 | #include <crypto/hash.h> | |
9d3cce0b MW |
16 | |
17 | #include "tb.h" | |
18 | ||
19 | static DEFINE_IDA(tb_domain_ida); | |
20 | ||
d1ff7024 MW |
21 | static bool match_service_id(const struct tb_service_id *id, |
22 | const struct tb_service *svc) | |
23 | { | |
24 | if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) { | |
25 | if (strcmp(id->protocol_key, svc->key)) | |
26 | return false; | |
27 | } | |
28 | ||
29 | if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) { | |
30 | if (id->protocol_id != svc->prtcid) | |
31 | return false; | |
32 | } | |
33 | ||
34 | if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { | |
35 | if (id->protocol_version != svc->prtcvers) | |
36 | return false; | |
37 | } | |
38 | ||
39 | if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { | |
40 | if (id->protocol_revision != svc->prtcrevs) | |
41 | return false; | |
42 | } | |
43 | ||
44 | return true; | |
45 | } | |
46 | ||
47 | static const struct tb_service_id *__tb_service_match(struct device *dev, | |
48 | struct device_driver *drv) | |
49 | { | |
50 | struct tb_service_driver *driver; | |
51 | const struct tb_service_id *ids; | |
52 | struct tb_service *svc; | |
53 | ||
54 | svc = tb_to_service(dev); | |
55 | if (!svc) | |
56 | return NULL; | |
57 | ||
58 | driver = container_of(drv, struct tb_service_driver, driver); | |
59 | if (!driver->id_table) | |
60 | return NULL; | |
61 | ||
62 | for (ids = driver->id_table; ids->match_flags != 0; ids++) { | |
63 | if (match_service_id(ids, svc)) | |
64 | return ids; | |
65 | } | |
66 | ||
67 | return NULL; | |
68 | } | |
69 | ||
70 | static int tb_service_match(struct device *dev, struct device_driver *drv) | |
71 | { | |
72 | return !!__tb_service_match(dev, drv); | |
73 | } | |
74 | ||
75 | static int tb_service_probe(struct device *dev) | |
76 | { | |
77 | struct tb_service *svc = tb_to_service(dev); | |
78 | struct tb_service_driver *driver; | |
79 | const struct tb_service_id *id; | |
80 | ||
81 | driver = container_of(dev->driver, struct tb_service_driver, driver); | |
82 | id = __tb_service_match(dev, &driver->driver); | |
83 | ||
84 | return driver->probe(svc, id); | |
85 | } | |
86 | ||
87 | static int tb_service_remove(struct device *dev) | |
88 | { | |
89 | struct tb_service *svc = tb_to_service(dev); | |
90 | struct tb_service_driver *driver; | |
91 | ||
92 | driver = container_of(dev->driver, struct tb_service_driver, driver); | |
93 | if (driver->remove) | |
94 | driver->remove(svc); | |
95 | ||
96 | return 0; | |
97 | } | |
98 | ||
99 | static void tb_service_shutdown(struct device *dev) | |
100 | { | |
101 | struct tb_service_driver *driver; | |
102 | struct tb_service *svc; | |
103 | ||
104 | svc = tb_to_service(dev); | |
105 | if (!svc || !dev->driver) | |
106 | return; | |
107 | ||
108 | driver = container_of(dev->driver, struct tb_service_driver, driver); | |
109 | if (driver->shutdown) | |
110 | driver->shutdown(svc); | |
111 | } | |
112 | ||
f67cf491 MW |
113 | static const char * const tb_security_names[] = { |
114 | [TB_SECURITY_NONE] = "none", | |
115 | [TB_SECURITY_USER] = "user", | |
116 | [TB_SECURITY_SECURE] = "secure", | |
117 | [TB_SECURITY_DPONLY] = "dponly", | |
6fc14e1a | 118 | [TB_SECURITY_USBONLY] = "usbonly", |
f67cf491 MW |
119 | }; |
120 | ||
9aaa3b8b MW |
121 | static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, |
122 | char *buf) | |
123 | { | |
124 | struct tb *tb = container_of(dev, struct tb, dev); | |
125 | uuid_t *uuids; | |
126 | ssize_t ret; | |
127 | int i; | |
128 | ||
129 | uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); | |
130 | if (!uuids) | |
131 | return -ENOMEM; | |
132 | ||
2d8ff0b5 MW |
133 | pm_runtime_get_sync(&tb->dev); |
134 | ||
9aaa3b8b MW |
135 | if (mutex_lock_interruptible(&tb->lock)) { |
136 | ret = -ERESTARTSYS; | |
137 | goto out; | |
138 | } | |
139 | ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); | |
140 | if (ret) { | |
141 | mutex_unlock(&tb->lock); | |
142 | goto out; | |
143 | } | |
144 | mutex_unlock(&tb->lock); | |
145 | ||
146 | for (ret = 0, i = 0; i < tb->nboot_acl; i++) { | |
147 | if (!uuid_is_null(&uuids[i])) | |
148 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb", | |
149 | &uuids[i]); | |
150 | ||
151 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s", | |
152 | i < tb->nboot_acl - 1 ? "," : "\n"); | |
153 | } | |
154 | ||
155 | out: | |
2d8ff0b5 MW |
156 | pm_runtime_mark_last_busy(&tb->dev); |
157 | pm_runtime_put_autosuspend(&tb->dev); | |
9aaa3b8b | 158 | kfree(uuids); |
2d8ff0b5 | 159 | |
9aaa3b8b MW |
160 | return ret; |
161 | } | |
162 | ||
163 | static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, | |
164 | const char *buf, size_t count) | |
165 | { | |
166 | struct tb *tb = container_of(dev, struct tb, dev); | |
167 | char *str, *s, *uuid_str; | |
168 | ssize_t ret = 0; | |
169 | uuid_t *acl; | |
170 | int i = 0; | |
171 | ||
172 | /* | |
173 | * Make sure the value is not bigger than tb->nboot_acl * UUID | |
174 | * length + commas and optional "\n". Also the smallest allowable | |
175 | * string is tb->nboot_acl * ",". | |
176 | */ | |
177 | if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1) | |
178 | return -EINVAL; | |
179 | if (count < tb->nboot_acl - 1) | |
180 | return -EINVAL; | |
181 | ||
182 | str = kstrdup(buf, GFP_KERNEL); | |
183 | if (!str) | |
184 | return -ENOMEM; | |
185 | ||
186 | acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); | |
187 | if (!acl) { | |
188 | ret = -ENOMEM; | |
189 | goto err_free_str; | |
190 | } | |
191 | ||
192 | uuid_str = strim(str); | |
193 | while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) { | |
194 | size_t len = strlen(s); | |
195 | ||
196 | if (len) { | |
197 | if (len != UUID_STRING_LEN) { | |
198 | ret = -EINVAL; | |
199 | goto err_free_acl; | |
200 | } | |
201 | ret = uuid_parse(s, &acl[i]); | |
202 | if (ret) | |
203 | goto err_free_acl; | |
204 | } | |
205 | ||
206 | i++; | |
207 | } | |
208 | ||
209 | if (s || i < tb->nboot_acl) { | |
210 | ret = -EINVAL; | |
211 | goto err_free_acl; | |
212 | } | |
213 | ||
2d8ff0b5 MW |
214 | pm_runtime_get_sync(&tb->dev); |
215 | ||
9aaa3b8b MW |
216 | if (mutex_lock_interruptible(&tb->lock)) { |
217 | ret = -ERESTARTSYS; | |
2d8ff0b5 | 218 | goto err_rpm_put; |
9aaa3b8b MW |
219 | } |
220 | ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); | |
007a7490 MW |
221 | if (!ret) { |
222 | /* Notify userspace about the change */ | |
223 | kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); | |
224 | } | |
9aaa3b8b MW |
225 | mutex_unlock(&tb->lock); |
226 | ||
2d8ff0b5 MW |
227 | err_rpm_put: |
228 | pm_runtime_mark_last_busy(&tb->dev); | |
229 | pm_runtime_put_autosuspend(&tb->dev); | |
9aaa3b8b MW |
230 | err_free_acl: |
231 | kfree(acl); | |
232 | err_free_str: | |
233 | kfree(str); | |
234 | ||
235 | return ret ?: count; | |
236 | } | |
237 | static DEVICE_ATTR_RW(boot_acl); | |
238 | ||
f67cf491 MW |
239 | static ssize_t security_show(struct device *dev, struct device_attribute *attr, |
240 | char *buf) | |
241 | { | |
242 | struct tb *tb = container_of(dev, struct tb, dev); | |
6fc14e1a | 243 | const char *name = "unknown"; |
f67cf491 | 244 | |
6fc14e1a MW |
245 | if (tb->security_level < ARRAY_SIZE(tb_security_names)) |
246 | name = tb_security_names[tb->security_level]; | |
247 | ||
248 | return sprintf(buf, "%s\n", name); | |
f67cf491 MW |
249 | } |
250 | static DEVICE_ATTR_RO(security); | |
251 | ||
252 | static struct attribute *domain_attrs[] = { | |
9aaa3b8b | 253 | &dev_attr_boot_acl.attr, |
f67cf491 MW |
254 | &dev_attr_security.attr, |
255 | NULL, | |
256 | }; | |
257 | ||
9aaa3b8b MW |
258 | static umode_t domain_attr_is_visible(struct kobject *kobj, |
259 | struct attribute *attr, int n) | |
260 | { | |
261 | struct device *dev = container_of(kobj, struct device, kobj); | |
262 | struct tb *tb = container_of(dev, struct tb, dev); | |
263 | ||
264 | if (attr == &dev_attr_boot_acl.attr) { | |
265 | if (tb->nboot_acl && | |
266 | tb->cm_ops->get_boot_acl && | |
267 | tb->cm_ops->set_boot_acl) | |
268 | return attr->mode; | |
269 | return 0; | |
270 | } | |
271 | ||
272 | return attr->mode; | |
273 | } | |
274 | ||
f67cf491 | 275 | static struct attribute_group domain_attr_group = { |
9aaa3b8b | 276 | .is_visible = domain_attr_is_visible, |
f67cf491 MW |
277 | .attrs = domain_attrs, |
278 | }; | |
279 | ||
280 | static const struct attribute_group *domain_attr_groups[] = { | |
281 | &domain_attr_group, | |
282 | NULL, | |
283 | }; | |
284 | ||
9d3cce0b MW |
285 | struct bus_type tb_bus_type = { |
286 | .name = "thunderbolt", | |
d1ff7024 MW |
287 | .match = tb_service_match, |
288 | .probe = tb_service_probe, | |
289 | .remove = tb_service_remove, | |
290 | .shutdown = tb_service_shutdown, | |
9d3cce0b MW |
291 | }; |
292 | ||
293 | static void tb_domain_release(struct device *dev) | |
294 | { | |
295 | struct tb *tb = container_of(dev, struct tb, dev); | |
296 | ||
297 | tb_ctl_free(tb->ctl); | |
298 | destroy_workqueue(tb->wq); | |
299 | ida_simple_remove(&tb_domain_ida, tb->index); | |
300 | mutex_destroy(&tb->lock); | |
301 | kfree(tb); | |
302 | } | |
303 | ||
304 | struct device_type tb_domain_type = { | |
305 | .name = "thunderbolt_domain", | |
306 | .release = tb_domain_release, | |
307 | }; | |
308 | ||
309 | /** | |
310 | * tb_domain_alloc() - Allocate a domain | |
311 | * @nhi: Pointer to the host controller | |
312 | * @privsize: Size of the connection manager private data | |
313 | * | |
314 | * Allocates and initializes a new Thunderbolt domain. Connection | |
315 | * managers are expected to call this and then fill in @cm_ops | |
316 | * accordingly. | |
317 | * | |
318 | * Call tb_domain_put() to release the domain before it has been added | |
319 | * to the system. | |
320 | * | |
321 | * Return: allocated domain structure on %NULL in case of error | |
322 | */ | |
323 | struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) | |
324 | { | |
325 | struct tb *tb; | |
326 | ||
327 | /* | |
328 | * Make sure the structure sizes map with that the hardware | |
329 | * expects because bit-fields are being used. | |
330 | */ | |
331 | BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); | |
332 | BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4); | |
333 | BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4); | |
334 | ||
335 | tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL); | |
336 | if (!tb) | |
337 | return NULL; | |
338 | ||
339 | tb->nhi = nhi; | |
340 | mutex_init(&tb->lock); | |
341 | ||
342 | tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); | |
343 | if (tb->index < 0) | |
344 | goto err_free; | |
345 | ||
346 | tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); | |
347 | if (!tb->wq) | |
348 | goto err_remove_ida; | |
349 | ||
350 | tb->dev.parent = &nhi->pdev->dev; | |
351 | tb->dev.bus = &tb_bus_type; | |
352 | tb->dev.type = &tb_domain_type; | |
f67cf491 | 353 | tb->dev.groups = domain_attr_groups; |
9d3cce0b MW |
354 | dev_set_name(&tb->dev, "domain%d", tb->index); |
355 | device_initialize(&tb->dev); | |
356 | ||
357 | return tb; | |
358 | ||
359 | err_remove_ida: | |
360 | ida_simple_remove(&tb_domain_ida, tb->index); | |
361 | err_free: | |
362 | kfree(tb); | |
363 | ||
364 | return NULL; | |
365 | } | |
366 | ||
d1ff7024 | 367 | static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, |
81a54b5e MW |
368 | const void *buf, size_t size) |
369 | { | |
370 | struct tb *tb = data; | |
371 | ||
372 | if (!tb->cm_ops->handle_event) { | |
373 | tb_warn(tb, "domain does not have event handler\n"); | |
d1ff7024 | 374 | return true; |
81a54b5e MW |
375 | } |
376 | ||
d1ff7024 MW |
377 | switch (type) { |
378 | case TB_CFG_PKG_XDOMAIN_REQ: | |
379 | case TB_CFG_PKG_XDOMAIN_RESP: | |
380 | return tb_xdomain_handle_request(tb, type, buf, size); | |
381 | ||
382 | default: | |
383 | tb->cm_ops->handle_event(tb, type, buf, size); | |
384 | } | |
385 | ||
386 | return true; | |
81a54b5e MW |
387 | } |
388 | ||
9d3cce0b MW |
389 | /** |
390 | * tb_domain_add() - Add domain to the system | |
391 | * @tb: Domain to add | |
392 | * | |
393 | * Starts the domain and adds it to the system. Hotplugging devices will | |
394 | * work after this has been returned successfully. In order to remove | |
395 | * and release the domain after this function has been called, call | |
396 | * tb_domain_remove(). | |
397 | * | |
398 | * Return: %0 in case of success and negative errno in case of error | |
399 | */ | |
400 | int tb_domain_add(struct tb *tb) | |
401 | { | |
402 | int ret; | |
403 | ||
404 | if (WARN_ON(!tb->cm_ops)) | |
405 | return -EINVAL; | |
406 | ||
407 | mutex_lock(&tb->lock); | |
408 | ||
81a54b5e | 409 | tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb); |
9d3cce0b MW |
410 | if (!tb->ctl) { |
411 | ret = -ENOMEM; | |
412 | goto err_unlock; | |
413 | } | |
414 | ||
415 | /* | |
416 | * tb_schedule_hotplug_handler may be called as soon as the config | |
417 | * channel is started. Thats why we have to hold the lock here. | |
418 | */ | |
419 | tb_ctl_start(tb->ctl); | |
420 | ||
f67cf491 MW |
421 | if (tb->cm_ops->driver_ready) { |
422 | ret = tb->cm_ops->driver_ready(tb); | |
423 | if (ret) | |
424 | goto err_ctl_stop; | |
425 | } | |
426 | ||
9d3cce0b MW |
427 | ret = device_add(&tb->dev); |
428 | if (ret) | |
429 | goto err_ctl_stop; | |
430 | ||
431 | /* Start the domain */ | |
432 | if (tb->cm_ops->start) { | |
433 | ret = tb->cm_ops->start(tb); | |
434 | if (ret) | |
435 | goto err_domain_del; | |
436 | } | |
437 | ||
438 | /* This starts event processing */ | |
439 | mutex_unlock(&tb->lock); | |
440 | ||
2d8ff0b5 MW |
441 | pm_runtime_no_callbacks(&tb->dev); |
442 | pm_runtime_set_active(&tb->dev); | |
443 | pm_runtime_enable(&tb->dev); | |
444 | pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY); | |
445 | pm_runtime_mark_last_busy(&tb->dev); | |
446 | pm_runtime_use_autosuspend(&tb->dev); | |
447 | ||
9d3cce0b MW |
448 | return 0; |
449 | ||
450 | err_domain_del: | |
451 | device_del(&tb->dev); | |
452 | err_ctl_stop: | |
453 | tb_ctl_stop(tb->ctl); | |
454 | err_unlock: | |
455 | mutex_unlock(&tb->lock); | |
456 | ||
457 | return ret; | |
458 | } | |
459 | ||
460 | /** | |
461 | * tb_domain_remove() - Removes and releases a domain | |
462 | * @tb: Domain to remove | |
463 | * | |
464 | * Stops the domain, removes it from the system and releases all | |
465 | * resources once the last reference has been released. | |
466 | */ | |
467 | void tb_domain_remove(struct tb *tb) | |
468 | { | |
469 | mutex_lock(&tb->lock); | |
470 | if (tb->cm_ops->stop) | |
471 | tb->cm_ops->stop(tb); | |
472 | /* Stop the domain control traffic */ | |
473 | tb_ctl_stop(tb->ctl); | |
474 | mutex_unlock(&tb->lock); | |
475 | ||
476 | flush_workqueue(tb->wq); | |
477 | device_unregister(&tb->dev); | |
478 | } | |
479 | ||
480 | /** | |
481 | * tb_domain_suspend_noirq() - Suspend a domain | |
482 | * @tb: Domain to suspend | |
483 | * | |
484 | * Suspends all devices in the domain and stops the control channel. | |
485 | */ | |
486 | int tb_domain_suspend_noirq(struct tb *tb) | |
487 | { | |
488 | int ret = 0; | |
489 | ||
490 | /* | |
491 | * The control channel interrupt is left enabled during suspend | |
492 | * and taking the lock here prevents any events happening before | |
493 | * we actually have stopped the domain and the control channel. | |
494 | */ | |
495 | mutex_lock(&tb->lock); | |
496 | if (tb->cm_ops->suspend_noirq) | |
497 | ret = tb->cm_ops->suspend_noirq(tb); | |
498 | if (!ret) | |
499 | tb_ctl_stop(tb->ctl); | |
500 | mutex_unlock(&tb->lock); | |
501 | ||
502 | return ret; | |
503 | } | |
504 | ||
505 | /** | |
506 | * tb_domain_resume_noirq() - Resume a domain | |
507 | * @tb: Domain to resume | |
508 | * | |
509 | * Re-starts the control channel, and resumes all devices connected to | |
510 | * the domain. | |
511 | */ | |
512 | int tb_domain_resume_noirq(struct tb *tb) | |
513 | { | |
514 | int ret = 0; | |
515 | ||
516 | mutex_lock(&tb->lock); | |
517 | tb_ctl_start(tb->ctl); | |
518 | if (tb->cm_ops->resume_noirq) | |
519 | ret = tb->cm_ops->resume_noirq(tb); | |
520 | mutex_unlock(&tb->lock); | |
521 | ||
522 | return ret; | |
523 | } | |
524 | ||
f67cf491 MW |
525 | int tb_domain_suspend(struct tb *tb) |
526 | { | |
84db6858 | 527 | return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; |
f67cf491 MW |
528 | } |
529 | ||
530 | void tb_domain_complete(struct tb *tb) | |
531 | { | |
f67cf491 MW |
532 | if (tb->cm_ops->complete) |
533 | tb->cm_ops->complete(tb); | |
f67cf491 MW |
534 | } |
535 | ||
2d8ff0b5 MW |
536 | int tb_domain_runtime_suspend(struct tb *tb) |
537 | { | |
538 | if (tb->cm_ops->runtime_suspend) { | |
539 | int ret = tb->cm_ops->runtime_suspend(tb); | |
540 | if (ret) | |
541 | return ret; | |
542 | } | |
543 | tb_ctl_stop(tb->ctl); | |
544 | return 0; | |
545 | } | |
546 | ||
547 | int tb_domain_runtime_resume(struct tb *tb) | |
548 | { | |
549 | tb_ctl_start(tb->ctl); | |
550 | if (tb->cm_ops->runtime_resume) { | |
551 | int ret = tb->cm_ops->runtime_resume(tb); | |
552 | if (ret) | |
553 | return ret; | |
554 | } | |
555 | return 0; | |
556 | } | |
557 | ||
f67cf491 MW |
558 | /** |
559 | * tb_domain_approve_switch() - Approve switch | |
560 | * @tb: Domain the switch belongs to | |
561 | * @sw: Switch to approve | |
562 | * | |
563 | * This will approve switch by connection manager specific means. In | |
564 | * case of success the connection manager will create tunnels for all | |
565 | * supported protocols. | |
566 | */ | |
567 | int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) | |
568 | { | |
569 | struct tb_switch *parent_sw; | |
570 | ||
571 | if (!tb->cm_ops->approve_switch) | |
572 | return -EPERM; | |
573 | ||
574 | /* The parent switch must be authorized before this one */ | |
575 | parent_sw = tb_to_switch(sw->dev.parent); | |
576 | if (!parent_sw || !parent_sw->authorized) | |
577 | return -EINVAL; | |
578 | ||
579 | return tb->cm_ops->approve_switch(tb, sw); | |
580 | } | |
581 | ||
582 | /** | |
583 | * tb_domain_approve_switch_key() - Approve switch and add key | |
584 | * @tb: Domain the switch belongs to | |
585 | * @sw: Switch to approve | |
586 | * | |
587 | * For switches that support secure connect, this function first adds | |
588 | * key to the switch NVM using connection manager specific means. If | |
589 | * adding the key is successful, the switch is approved and connected. | |
590 | * | |
591 | * Return: %0 on success and negative errno in case of failure. | |
592 | */ | |
593 | int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw) | |
594 | { | |
595 | struct tb_switch *parent_sw; | |
596 | int ret; | |
597 | ||
598 | if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key) | |
599 | return -EPERM; | |
600 | ||
601 | /* The parent switch must be authorized before this one */ | |
602 | parent_sw = tb_to_switch(sw->dev.parent); | |
603 | if (!parent_sw || !parent_sw->authorized) | |
604 | return -EINVAL; | |
605 | ||
606 | ret = tb->cm_ops->add_switch_key(tb, sw); | |
607 | if (ret) | |
608 | return ret; | |
609 | ||
610 | return tb->cm_ops->approve_switch(tb, sw); | |
611 | } | |
612 | ||
613 | /** | |
614 | * tb_domain_challenge_switch_key() - Challenge and approve switch | |
615 | * @tb: Domain the switch belongs to | |
616 | * @sw: Switch to approve | |
617 | * | |
618 | * For switches that support secure connect, this function generates | |
619 | * random challenge and sends it to the switch. The switch responds to | |
620 | * this and if the response matches our random challenge, the switch is | |
621 | * approved and connected. | |
622 | * | |
623 | * Return: %0 on success and negative errno in case of failure. | |
624 | */ | |
625 | int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) | |
626 | { | |
627 | u8 challenge[TB_SWITCH_KEY_SIZE]; | |
628 | u8 response[TB_SWITCH_KEY_SIZE]; | |
629 | u8 hmac[TB_SWITCH_KEY_SIZE]; | |
630 | struct tb_switch *parent_sw; | |
631 | struct crypto_shash *tfm; | |
632 | struct shash_desc *shash; | |
633 | int ret; | |
634 | ||
635 | if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) | |
636 | return -EPERM; | |
637 | ||
638 | /* The parent switch must be authorized before this one */ | |
639 | parent_sw = tb_to_switch(sw->dev.parent); | |
640 | if (!parent_sw || !parent_sw->authorized) | |
641 | return -EINVAL; | |
642 | ||
643 | get_random_bytes(challenge, sizeof(challenge)); | |
644 | ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); | |
645 | if (ret) | |
646 | return ret; | |
647 | ||
648 | tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); | |
649 | if (IS_ERR(tfm)) | |
650 | return PTR_ERR(tfm); | |
651 | ||
652 | ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); | |
653 | if (ret) | |
654 | goto err_free_tfm; | |
655 | ||
656 | shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), | |
657 | GFP_KERNEL); | |
658 | if (!shash) { | |
659 | ret = -ENOMEM; | |
660 | goto err_free_tfm; | |
661 | } | |
662 | ||
663 | shash->tfm = tfm; | |
664 | shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
665 | ||
666 | memset(hmac, 0, sizeof(hmac)); | |
667 | ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); | |
668 | if (ret) | |
669 | goto err_free_shash; | |
670 | ||
671 | /* The returned HMAC must match the one we calculated */ | |
672 | if (memcmp(response, hmac, sizeof(hmac))) { | |
673 | ret = -EKEYREJECTED; | |
674 | goto err_free_shash; | |
675 | } | |
676 | ||
677 | crypto_free_shash(tfm); | |
678 | kfree(shash); | |
679 | ||
680 | return tb->cm_ops->approve_switch(tb, sw); | |
681 | ||
682 | err_free_shash: | |
683 | kfree(shash); | |
684 | err_free_tfm: | |
685 | crypto_free_shash(tfm); | |
686 | ||
687 | return ret; | |
688 | } | |
689 | ||
e6b245cc MW |
690 | /** |
691 | * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths | |
692 | * @tb: Domain whose PCIe paths to disconnect | |
693 | * | |
694 | * This needs to be called in preparation for NVM upgrade of the host | |
695 | * controller. Makes sure all PCIe paths are disconnected. | |
696 | * | |
697 | * Return %0 on success and negative errno in case of error. | |
698 | */ | |
699 | int tb_domain_disconnect_pcie_paths(struct tb *tb) | |
700 | { | |
701 | if (!tb->cm_ops->disconnect_pcie_paths) | |
702 | return -EPERM; | |
703 | ||
704 | return tb->cm_ops->disconnect_pcie_paths(tb); | |
705 | } | |
706 | ||
d1ff7024 MW |
707 | /** |
708 | * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain | |
709 | * @tb: Domain enabling the DMA paths | |
710 | * @xd: XDomain DMA paths are created to | |
711 | * | |
712 | * Calls connection manager specific method to enable DMA paths to the | |
713 | * XDomain in question. | |
714 | * | |
715 | * Return: 0% in case of success and negative errno otherwise. In | |
716 | * particular returns %-ENOTSUPP if the connection manager | |
717 | * implementation does not support XDomains. | |
718 | */ | |
719 | int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
720 | { | |
721 | if (!tb->cm_ops->approve_xdomain_paths) | |
722 | return -ENOTSUPP; | |
723 | ||
724 | return tb->cm_ops->approve_xdomain_paths(tb, xd); | |
725 | } | |
726 | ||
727 | /** | |
728 | * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain | |
729 | * @tb: Domain disabling the DMA paths | |
730 | * @xd: XDomain whose DMA paths are disconnected | |
731 | * | |
732 | * Calls connection manager specific method to disconnect DMA paths to | |
733 | * the XDomain in question. | |
734 | * | |
735 | * Return: 0% in case of success and negative errno otherwise. In | |
736 | * particular returns %-ENOTSUPP if the connection manager | |
737 | * implementation does not support XDomains. | |
738 | */ | |
739 | int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
740 | { | |
741 | if (!tb->cm_ops->disconnect_xdomain_paths) | |
742 | return -ENOTSUPP; | |
743 | ||
744 | return tb->cm_ops->disconnect_xdomain_paths(tb, xd); | |
745 | } | |
746 | ||
747 | static int disconnect_xdomain(struct device *dev, void *data) | |
748 | { | |
749 | struct tb_xdomain *xd; | |
750 | struct tb *tb = data; | |
751 | int ret = 0; | |
752 | ||
753 | xd = tb_to_xdomain(dev); | |
754 | if (xd && xd->tb == tb) | |
755 | ret = tb_xdomain_disable_paths(xd); | |
756 | ||
757 | return ret; | |
758 | } | |
759 | ||
760 | /** | |
761 | * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain | |
762 | * @tb: Domain whose paths are disconnected | |
763 | * | |
764 | * This function can be used to disconnect all paths (PCIe, XDomain) for | |
765 | * example in preparation for host NVM firmware upgrade. After this is | |
766 | * called the paths cannot be established without resetting the switch. | |
767 | * | |
768 | * Return: %0 in case of success and negative errno otherwise. | |
769 | */ | |
770 | int tb_domain_disconnect_all_paths(struct tb *tb) | |
771 | { | |
772 | int ret; | |
773 | ||
774 | ret = tb_domain_disconnect_pcie_paths(tb); | |
775 | if (ret) | |
776 | return ret; | |
777 | ||
778 | return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain); | |
779 | } | |
780 | ||
9d3cce0b MW |
781 | int tb_domain_init(void) |
782 | { | |
d1ff7024 MW |
783 | int ret; |
784 | ||
785 | ret = tb_xdomain_init(); | |
786 | if (ret) | |
787 | return ret; | |
788 | ret = bus_register(&tb_bus_type); | |
789 | if (ret) | |
790 | tb_xdomain_exit(); | |
791 | ||
792 | return ret; | |
9d3cce0b MW |
793 | } |
794 | ||
795 | void tb_domain_exit(void) | |
796 | { | |
797 | bus_unregister(&tb_bus_type); | |
798 | ida_destroy(&tb_domain_ida); | |
e6b245cc | 799 | tb_switch_exit(); |
d1ff7024 | 800 | tb_xdomain_exit(); |
9d3cce0b | 801 | } |