Commit | Line | Data |
---|---|---|
cba3345c AW |
1 | /* |
2 | * VFIO core | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | */ | |
15 | ||
16 | #include <linux/cdev.h> | |
17 | #include <linux/compat.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/file.h> | |
20 | #include <linux/anon_inodes.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/idr.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/list.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/mutex.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/string.h> | |
30 | #include <linux/uaccess.h> | |
31 | #include <linux/vfio.h> | |
32 | #include <linux/wait.h> | |
33 | ||
34 | #define DRIVER_VERSION "0.3" | |
35 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
36 | #define DRIVER_DESC "VFIO - User Level meta-driver" | |
37 | ||
38 | static struct vfio { | |
39 | struct class *class; | |
40 | struct list_head iommu_drivers_list; | |
41 | struct mutex iommu_drivers_lock; | |
42 | struct list_head group_list; | |
43 | struct idr group_idr; | |
44 | struct mutex group_lock; | |
45 | struct cdev group_cdev; | |
46 | struct device *dev; | |
47 | dev_t devt; | |
48 | struct cdev cdev; | |
49 | wait_queue_head_t release_q; | |
50 | } vfio; | |
51 | ||
52 | struct vfio_iommu_driver { | |
53 | const struct vfio_iommu_driver_ops *ops; | |
54 | struct list_head vfio_next; | |
55 | }; | |
56 | ||
57 | struct vfio_container { | |
58 | struct kref kref; | |
59 | struct list_head group_list; | |
60 | struct mutex group_lock; | |
61 | struct vfio_iommu_driver *iommu_driver; | |
62 | void *iommu_data; | |
63 | }; | |
64 | ||
65 | struct vfio_group { | |
66 | struct kref kref; | |
67 | int minor; | |
68 | atomic_t container_users; | |
69 | struct iommu_group *iommu_group; | |
70 | struct vfio_container *container; | |
71 | struct list_head device_list; | |
72 | struct mutex device_lock; | |
73 | struct device *dev; | |
74 | struct notifier_block nb; | |
75 | struct list_head vfio_next; | |
76 | struct list_head container_next; | |
77 | }; | |
78 | ||
79 | struct vfio_device { | |
80 | struct kref kref; | |
81 | struct device *dev; | |
82 | const struct vfio_device_ops *ops; | |
83 | struct vfio_group *group; | |
84 | struct list_head group_next; | |
85 | void *device_data; | |
86 | }; | |
87 | ||
88 | /** | |
89 | * IOMMU driver registration | |
90 | */ | |
91 | int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
92 | { | |
93 | struct vfio_iommu_driver *driver, *tmp; | |
94 | ||
95 | driver = kzalloc(sizeof(*driver), GFP_KERNEL); | |
96 | if (!driver) | |
97 | return -ENOMEM; | |
98 | ||
99 | driver->ops = ops; | |
100 | ||
101 | mutex_lock(&vfio.iommu_drivers_lock); | |
102 | ||
103 | /* Check for duplicates */ | |
104 | list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { | |
105 | if (tmp->ops == ops) { | |
106 | mutex_unlock(&vfio.iommu_drivers_lock); | |
107 | kfree(driver); | |
108 | return -EINVAL; | |
109 | } | |
110 | } | |
111 | ||
112 | list_add(&driver->vfio_next, &vfio.iommu_drivers_list); | |
113 | ||
114 | mutex_unlock(&vfio.iommu_drivers_lock); | |
115 | ||
116 | return 0; | |
117 | } | |
118 | EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); | |
119 | ||
120 | void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
121 | { | |
122 | struct vfio_iommu_driver *driver; | |
123 | ||
124 | mutex_lock(&vfio.iommu_drivers_lock); | |
125 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
126 | if (driver->ops == ops) { | |
127 | list_del(&driver->vfio_next); | |
128 | mutex_unlock(&vfio.iommu_drivers_lock); | |
129 | kfree(driver); | |
130 | return; | |
131 | } | |
132 | } | |
133 | mutex_unlock(&vfio.iommu_drivers_lock); | |
134 | } | |
135 | EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); | |
136 | ||
137 | /** | |
138 | * Group minor allocation/free - both called with vfio.group_lock held | |
139 | */ | |
140 | static int vfio_alloc_group_minor(struct vfio_group *group) | |
141 | { | |
142 | int ret, minor; | |
143 | ||
144 | again: | |
145 | if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0)) | |
146 | return -ENOMEM; | |
147 | ||
148 | /* index 0 is used by /dev/vfio/vfio */ | |
149 | ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor); | |
150 | if (ret == -EAGAIN) | |
151 | goto again; | |
152 | if (ret || minor > MINORMASK) { | |
153 | if (minor > MINORMASK) | |
154 | idr_remove(&vfio.group_idr, minor); | |
155 | return -ENOSPC; | |
156 | } | |
157 | ||
158 | return minor; | |
159 | } | |
160 | ||
161 | static void vfio_free_group_minor(int minor) | |
162 | { | |
163 | idr_remove(&vfio.group_idr, minor); | |
164 | } | |
165 | ||
166 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
167 | unsigned long action, void *data); | |
168 | static void vfio_group_get(struct vfio_group *group); | |
169 | ||
170 | /** | |
171 | * Container objects - containers are created when /dev/vfio/vfio is | |
172 | * opened, but their lifecycle extends until the last user is done, so | |
173 | * it's freed via kref. Must support container/group/device being | |
174 | * closed in any order. | |
175 | */ | |
176 | static void vfio_container_get(struct vfio_container *container) | |
177 | { | |
178 | kref_get(&container->kref); | |
179 | } | |
180 | ||
181 | static void vfio_container_release(struct kref *kref) | |
182 | { | |
183 | struct vfio_container *container; | |
184 | container = container_of(kref, struct vfio_container, kref); | |
185 | ||
186 | kfree(container); | |
187 | } | |
188 | ||
189 | static void vfio_container_put(struct vfio_container *container) | |
190 | { | |
191 | kref_put(&container->kref, vfio_container_release); | |
192 | } | |
193 | ||
194 | /** | |
195 | * Group objects - create, release, get, put, search | |
196 | */ | |
197 | static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) | |
198 | { | |
199 | struct vfio_group *group, *tmp; | |
200 | struct device *dev; | |
201 | int ret, minor; | |
202 | ||
203 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
204 | if (!group) | |
205 | return ERR_PTR(-ENOMEM); | |
206 | ||
207 | kref_init(&group->kref); | |
208 | INIT_LIST_HEAD(&group->device_list); | |
209 | mutex_init(&group->device_lock); | |
210 | atomic_set(&group->container_users, 0); | |
211 | group->iommu_group = iommu_group; | |
212 | ||
213 | group->nb.notifier_call = vfio_iommu_group_notifier; | |
214 | ||
215 | /* | |
216 | * blocking notifiers acquire a rwsem around registering and hold | |
217 | * it around callback. Therefore, need to register outside of | |
218 | * vfio.group_lock to avoid A-B/B-A contention. Our callback won't | |
219 | * do anything unless it can find the group in vfio.group_list, so | |
220 | * no harm in registering early. | |
221 | */ | |
222 | ret = iommu_group_register_notifier(iommu_group, &group->nb); | |
223 | if (ret) { | |
224 | kfree(group); | |
225 | return ERR_PTR(ret); | |
226 | } | |
227 | ||
228 | mutex_lock(&vfio.group_lock); | |
229 | ||
230 | minor = vfio_alloc_group_minor(group); | |
231 | if (minor < 0) { | |
232 | mutex_unlock(&vfio.group_lock); | |
233 | kfree(group); | |
234 | return ERR_PTR(minor); | |
235 | } | |
236 | ||
237 | /* Did we race creating this group? */ | |
238 | list_for_each_entry(tmp, &vfio.group_list, vfio_next) { | |
239 | if (tmp->iommu_group == iommu_group) { | |
240 | vfio_group_get(tmp); | |
241 | vfio_free_group_minor(minor); | |
242 | mutex_unlock(&vfio.group_lock); | |
243 | kfree(group); | |
244 | return tmp; | |
245 | } | |
246 | } | |
247 | ||
248 | dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor), | |
249 | group, "%d", iommu_group_id(iommu_group)); | |
250 | if (IS_ERR(dev)) { | |
251 | vfio_free_group_minor(minor); | |
252 | mutex_unlock(&vfio.group_lock); | |
253 | kfree(group); | |
254 | return (struct vfio_group *)dev; /* ERR_PTR */ | |
255 | } | |
256 | ||
257 | group->minor = minor; | |
258 | group->dev = dev; | |
259 | ||
260 | list_add(&group->vfio_next, &vfio.group_list); | |
261 | ||
262 | mutex_unlock(&vfio.group_lock); | |
263 | ||
264 | return group; | |
265 | } | |
266 | ||
267 | static void vfio_group_release(struct kref *kref) | |
268 | { | |
269 | struct vfio_group *group = container_of(kref, struct vfio_group, kref); | |
270 | ||
271 | WARN_ON(!list_empty(&group->device_list)); | |
272 | ||
273 | device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor)); | |
274 | list_del(&group->vfio_next); | |
275 | vfio_free_group_minor(group->minor); | |
276 | ||
277 | mutex_unlock(&vfio.group_lock); | |
278 | ||
279 | /* | |
280 | * Unregister outside of lock. A spurious callback is harmless now | |
281 | * that the group is no longer in vfio.group_list. | |
282 | */ | |
283 | iommu_group_unregister_notifier(group->iommu_group, &group->nb); | |
284 | ||
285 | kfree(group); | |
286 | } | |
287 | ||
288 | static void vfio_group_put(struct vfio_group *group) | |
289 | { | |
290 | mutex_lock(&vfio.group_lock); | |
291 | /* | |
292 | * Release needs to unlock to unregister the notifier, so only | |
293 | * unlock if not released. | |
294 | */ | |
295 | if (!kref_put(&group->kref, vfio_group_release)) | |
296 | mutex_unlock(&vfio.group_lock); | |
297 | } | |
298 | ||
299 | /* Assume group_lock or group reference is held */ | |
300 | static void vfio_group_get(struct vfio_group *group) | |
301 | { | |
302 | kref_get(&group->kref); | |
303 | } | |
304 | ||
305 | /* | |
306 | * Not really a try as we will sleep for mutex, but we need to make | |
307 | * sure the group pointer is valid under lock and get a reference. | |
308 | */ | |
309 | static struct vfio_group *vfio_group_try_get(struct vfio_group *group) | |
310 | { | |
311 | struct vfio_group *target = group; | |
312 | ||
313 | mutex_lock(&vfio.group_lock); | |
314 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
315 | if (group == target) { | |
316 | vfio_group_get(group); | |
317 | mutex_unlock(&vfio.group_lock); | |
318 | return group; | |
319 | } | |
320 | } | |
321 | mutex_unlock(&vfio.group_lock); | |
322 | ||
323 | return NULL; | |
324 | } | |
325 | ||
326 | static | |
327 | struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group) | |
328 | { | |
329 | struct vfio_group *group; | |
330 | ||
331 | mutex_lock(&vfio.group_lock); | |
332 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
333 | if (group->iommu_group == iommu_group) { | |
334 | vfio_group_get(group); | |
335 | mutex_unlock(&vfio.group_lock); | |
336 | return group; | |
337 | } | |
338 | } | |
339 | mutex_unlock(&vfio.group_lock); | |
340 | ||
341 | return NULL; | |
342 | } | |
343 | ||
344 | static struct vfio_group *vfio_group_get_from_minor(int minor) | |
345 | { | |
346 | struct vfio_group *group; | |
347 | ||
348 | mutex_lock(&vfio.group_lock); | |
349 | group = idr_find(&vfio.group_idr, minor); | |
350 | if (!group) { | |
351 | mutex_unlock(&vfio.group_lock); | |
352 | return NULL; | |
353 | } | |
354 | vfio_group_get(group); | |
355 | mutex_unlock(&vfio.group_lock); | |
356 | ||
357 | return group; | |
358 | } | |
359 | ||
360 | /** | |
361 | * Device objects - create, release, get, put, search | |
362 | */ | |
363 | static | |
364 | struct vfio_device *vfio_group_create_device(struct vfio_group *group, | |
365 | struct device *dev, | |
366 | const struct vfio_device_ops *ops, | |
367 | void *device_data) | |
368 | { | |
369 | struct vfio_device *device; | |
370 | int ret; | |
371 | ||
372 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
373 | if (!device) | |
374 | return ERR_PTR(-ENOMEM); | |
375 | ||
376 | kref_init(&device->kref); | |
377 | device->dev = dev; | |
378 | device->group = group; | |
379 | device->ops = ops; | |
380 | device->device_data = device_data; | |
381 | ||
382 | ret = dev_set_drvdata(dev, device); | |
383 | if (ret) { | |
384 | kfree(device); | |
385 | return ERR_PTR(ret); | |
386 | } | |
387 | ||
388 | /* No need to get group_lock, caller has group reference */ | |
389 | vfio_group_get(group); | |
390 | ||
391 | mutex_lock(&group->device_lock); | |
392 | list_add(&device->group_next, &group->device_list); | |
393 | mutex_unlock(&group->device_lock); | |
394 | ||
395 | return device; | |
396 | } | |
397 | ||
398 | static void vfio_device_release(struct kref *kref) | |
399 | { | |
400 | struct vfio_device *device = container_of(kref, | |
401 | struct vfio_device, kref); | |
402 | struct vfio_group *group = device->group; | |
403 | ||
404 | mutex_lock(&group->device_lock); | |
405 | list_del(&device->group_next); | |
406 | mutex_unlock(&group->device_lock); | |
407 | ||
408 | dev_set_drvdata(device->dev, NULL); | |
409 | ||
410 | kfree(device); | |
411 | ||
412 | /* vfio_del_group_dev may be waiting for this device */ | |
413 | wake_up(&vfio.release_q); | |
414 | } | |
415 | ||
416 | /* Device reference always implies a group reference */ | |
417 | static void vfio_device_put(struct vfio_device *device) | |
418 | { | |
934ad4c2 | 419 | struct vfio_group *group = device->group; |
cba3345c | 420 | kref_put(&device->kref, vfio_device_release); |
934ad4c2 | 421 | vfio_group_put(group); |
cba3345c AW |
422 | } |
423 | ||
424 | static void vfio_device_get(struct vfio_device *device) | |
425 | { | |
426 | vfio_group_get(device->group); | |
427 | kref_get(&device->kref); | |
428 | } | |
429 | ||
430 | static struct vfio_device *vfio_group_get_device(struct vfio_group *group, | |
431 | struct device *dev) | |
432 | { | |
433 | struct vfio_device *device; | |
434 | ||
435 | mutex_lock(&group->device_lock); | |
436 | list_for_each_entry(device, &group->device_list, group_next) { | |
437 | if (device->dev == dev) { | |
438 | vfio_device_get(device); | |
439 | mutex_unlock(&group->device_lock); | |
440 | return device; | |
441 | } | |
442 | } | |
443 | mutex_unlock(&group->device_lock); | |
444 | return NULL; | |
445 | } | |
446 | ||
447 | /* | |
448 | * Whitelist some drivers that we know are safe (no dma) or just sit on | |
449 | * a device. It's not always practical to leave a device within a group | |
450 | * driverless as it could get re-bound to something unsafe. | |
451 | */ | |
452 | static const char * const vfio_driver_whitelist[] = { "pci-stub" }; | |
453 | ||
454 | static bool vfio_whitelisted_driver(struct device_driver *drv) | |
455 | { | |
456 | int i; | |
457 | ||
458 | for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) { | |
459 | if (!strcmp(drv->name, vfio_driver_whitelist[i])) | |
460 | return true; | |
461 | } | |
462 | ||
463 | return false; | |
464 | } | |
465 | ||
466 | /* | |
467 | * A vfio group is viable for use by userspace if all devices are either | |
468 | * driver-less or bound to a vfio or whitelisted driver. We test the | |
469 | * latter by the existence of a struct vfio_device matching the dev. | |
470 | */ | |
471 | static int vfio_dev_viable(struct device *dev, void *data) | |
472 | { | |
473 | struct vfio_group *group = data; | |
474 | struct vfio_device *device; | |
475 | ||
476 | if (!dev->driver || vfio_whitelisted_driver(dev->driver)) | |
477 | return 0; | |
478 | ||
479 | device = vfio_group_get_device(group, dev); | |
480 | if (device) { | |
481 | vfio_device_put(device); | |
482 | return 0; | |
483 | } | |
484 | ||
485 | return -EINVAL; | |
486 | } | |
487 | ||
488 | /** | |
489 | * Async device support | |
490 | */ | |
491 | static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) | |
492 | { | |
493 | struct vfio_device *device; | |
494 | ||
495 | /* Do we already know about it? We shouldn't */ | |
496 | device = vfio_group_get_device(group, dev); | |
497 | if (WARN_ON_ONCE(device)) { | |
498 | vfio_device_put(device); | |
499 | return 0; | |
500 | } | |
501 | ||
502 | /* Nothing to do for idle groups */ | |
503 | if (!atomic_read(&group->container_users)) | |
504 | return 0; | |
505 | ||
506 | /* TODO Prevent device auto probing */ | |
507 | WARN("Device %s added to live group %d!\n", dev_name(dev), | |
508 | iommu_group_id(group->iommu_group)); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev) | |
514 | { | |
515 | struct vfio_device *device; | |
516 | ||
517 | /* | |
518 | * Expect to fall out here. If a device was in use, it would | |
519 | * have been bound to a vfio sub-driver, which would have blocked | |
520 | * in .remove at vfio_del_group_dev. Sanity check that we no | |
521 | * longer track the device, so it's safe to remove. | |
522 | */ | |
523 | device = vfio_group_get_device(group, dev); | |
524 | if (likely(!device)) | |
525 | return 0; | |
526 | ||
527 | WARN("Device %s removed from live group %d!\n", dev_name(dev), | |
528 | iommu_group_id(group->iommu_group)); | |
529 | ||
530 | vfio_device_put(device); | |
531 | return 0; | |
532 | } | |
533 | ||
534 | static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) | |
535 | { | |
536 | /* We don't care what happens when the group isn't in use */ | |
537 | if (!atomic_read(&group->container_users)) | |
538 | return 0; | |
539 | ||
540 | return vfio_dev_viable(dev, group); | |
541 | } | |
542 | ||
543 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
544 | unsigned long action, void *data) | |
545 | { | |
546 | struct vfio_group *group = container_of(nb, struct vfio_group, nb); | |
547 | struct device *dev = data; | |
548 | ||
549 | /* | |
550 | * Need to go through a group_lock lookup to get a reference or | |
551 | * we risk racing a group being removed. Leave a WARN_ON for | |
552 | * debuging, but if the group no longer exists, a spurious notify | |
553 | * is harmless. | |
554 | */ | |
555 | group = vfio_group_try_get(group); | |
556 | if (WARN_ON(!group)) | |
557 | return NOTIFY_OK; | |
558 | ||
559 | switch (action) { | |
560 | case IOMMU_GROUP_NOTIFY_ADD_DEVICE: | |
561 | vfio_group_nb_add_dev(group, dev); | |
562 | break; | |
563 | case IOMMU_GROUP_NOTIFY_DEL_DEVICE: | |
564 | vfio_group_nb_del_dev(group, dev); | |
565 | break; | |
566 | case IOMMU_GROUP_NOTIFY_BIND_DRIVER: | |
567 | pr_debug("%s: Device %s, group %d binding to driver\n", | |
568 | __func__, dev_name(dev), | |
569 | iommu_group_id(group->iommu_group)); | |
570 | break; | |
571 | case IOMMU_GROUP_NOTIFY_BOUND_DRIVER: | |
572 | pr_debug("%s: Device %s, group %d bound to driver %s\n", | |
573 | __func__, dev_name(dev), | |
574 | iommu_group_id(group->iommu_group), dev->driver->name); | |
575 | BUG_ON(vfio_group_nb_verify(group, dev)); | |
576 | break; | |
577 | case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER: | |
578 | pr_debug("%s: Device %s, group %d unbinding from driver %s\n", | |
579 | __func__, dev_name(dev), | |
580 | iommu_group_id(group->iommu_group), dev->driver->name); | |
581 | break; | |
582 | case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER: | |
583 | pr_debug("%s: Device %s, group %d unbound from driver\n", | |
584 | __func__, dev_name(dev), | |
585 | iommu_group_id(group->iommu_group)); | |
586 | /* | |
587 | * XXX An unbound device in a live group is ok, but we'd | |
588 | * really like to avoid the above BUG_ON by preventing other | |
589 | * drivers from binding to it. Once that occurs, we have to | |
590 | * stop the system to maintain isolation. At a minimum, we'd | |
591 | * want a toggle to disable driver auto probe for this device. | |
592 | */ | |
593 | break; | |
594 | } | |
595 | ||
596 | vfio_group_put(group); | |
597 | return NOTIFY_OK; | |
598 | } | |
599 | ||
600 | /** | |
601 | * VFIO driver API | |
602 | */ | |
603 | int vfio_add_group_dev(struct device *dev, | |
604 | const struct vfio_device_ops *ops, void *device_data) | |
605 | { | |
606 | struct iommu_group *iommu_group; | |
607 | struct vfio_group *group; | |
608 | struct vfio_device *device; | |
609 | ||
610 | iommu_group = iommu_group_get(dev); | |
611 | if (!iommu_group) | |
612 | return -EINVAL; | |
613 | ||
614 | group = vfio_group_get_from_iommu(iommu_group); | |
615 | if (!group) { | |
616 | group = vfio_create_group(iommu_group); | |
617 | if (IS_ERR(group)) { | |
618 | iommu_group_put(iommu_group); | |
619 | return PTR_ERR(group); | |
620 | } | |
621 | } | |
622 | ||
623 | device = vfio_group_get_device(group, dev); | |
624 | if (device) { | |
625 | WARN(1, "Device %s already exists on group %d\n", | |
626 | dev_name(dev), iommu_group_id(iommu_group)); | |
627 | vfio_device_put(device); | |
628 | vfio_group_put(group); | |
629 | iommu_group_put(iommu_group); | |
630 | return -EBUSY; | |
631 | } | |
632 | ||
633 | device = vfio_group_create_device(group, dev, ops, device_data); | |
634 | if (IS_ERR(device)) { | |
635 | vfio_group_put(group); | |
636 | iommu_group_put(iommu_group); | |
637 | return PTR_ERR(device); | |
638 | } | |
639 | ||
640 | /* | |
641 | * Added device holds reference to iommu_group and vfio_device | |
642 | * (which in turn holds reference to vfio_group). Drop extra | |
643 | * group reference used while acquiring device. | |
644 | */ | |
645 | vfio_group_put(group); | |
646 | ||
647 | return 0; | |
648 | } | |
649 | EXPORT_SYMBOL_GPL(vfio_add_group_dev); | |
650 | ||
651 | /* Test whether a struct device is present in our tracking */ | |
652 | static bool vfio_dev_present(struct device *dev) | |
653 | { | |
654 | struct iommu_group *iommu_group; | |
655 | struct vfio_group *group; | |
656 | struct vfio_device *device; | |
657 | ||
658 | iommu_group = iommu_group_get(dev); | |
659 | if (!iommu_group) | |
660 | return false; | |
661 | ||
662 | group = vfio_group_get_from_iommu(iommu_group); | |
663 | if (!group) { | |
664 | iommu_group_put(iommu_group); | |
665 | return false; | |
666 | } | |
667 | ||
668 | device = vfio_group_get_device(group, dev); | |
669 | if (!device) { | |
670 | vfio_group_put(group); | |
671 | iommu_group_put(iommu_group); | |
672 | return false; | |
673 | } | |
674 | ||
675 | vfio_device_put(device); | |
676 | vfio_group_put(group); | |
677 | iommu_group_put(iommu_group); | |
678 | return true; | |
679 | } | |
680 | ||
681 | /* | |
682 | * Decrement the device reference count and wait for the device to be | |
683 | * removed. Open file descriptors for the device... */ | |
684 | void *vfio_del_group_dev(struct device *dev) | |
685 | { | |
686 | struct vfio_device *device = dev_get_drvdata(dev); | |
687 | struct vfio_group *group = device->group; | |
688 | struct iommu_group *iommu_group = group->iommu_group; | |
689 | void *device_data = device->device_data; | |
690 | ||
691 | vfio_device_put(device); | |
692 | ||
693 | /* TODO send a signal to encourage this to be released */ | |
694 | wait_event(vfio.release_q, !vfio_dev_present(dev)); | |
695 | ||
696 | iommu_group_put(iommu_group); | |
697 | ||
698 | return device_data; | |
699 | } | |
700 | EXPORT_SYMBOL_GPL(vfio_del_group_dev); | |
701 | ||
702 | /** | |
703 | * VFIO base fd, /dev/vfio/vfio | |
704 | */ | |
705 | static long vfio_ioctl_check_extension(struct vfio_container *container, | |
706 | unsigned long arg) | |
707 | { | |
708 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
709 | long ret = 0; | |
710 | ||
711 | switch (arg) { | |
712 | /* No base extensions yet */ | |
713 | default: | |
714 | /* | |
715 | * If no driver is set, poll all registered drivers for | |
716 | * extensions and return the first positive result. If | |
717 | * a driver is already set, further queries will be passed | |
718 | * only to that driver. | |
719 | */ | |
720 | if (!driver) { | |
721 | mutex_lock(&vfio.iommu_drivers_lock); | |
722 | list_for_each_entry(driver, &vfio.iommu_drivers_list, | |
723 | vfio_next) { | |
724 | if (!try_module_get(driver->ops->owner)) | |
725 | continue; | |
726 | ||
727 | ret = driver->ops->ioctl(NULL, | |
728 | VFIO_CHECK_EXTENSION, | |
729 | arg); | |
730 | module_put(driver->ops->owner); | |
731 | if (ret > 0) | |
732 | break; | |
733 | } | |
734 | mutex_unlock(&vfio.iommu_drivers_lock); | |
735 | } else | |
736 | ret = driver->ops->ioctl(container->iommu_data, | |
737 | VFIO_CHECK_EXTENSION, arg); | |
738 | } | |
739 | ||
740 | return ret; | |
741 | } | |
742 | ||
743 | /* hold container->group_lock */ | |
744 | static int __vfio_container_attach_groups(struct vfio_container *container, | |
745 | struct vfio_iommu_driver *driver, | |
746 | void *data) | |
747 | { | |
748 | struct vfio_group *group; | |
749 | int ret = -ENODEV; | |
750 | ||
751 | list_for_each_entry(group, &container->group_list, container_next) { | |
752 | ret = driver->ops->attach_group(data, group->iommu_group); | |
753 | if (ret) | |
754 | goto unwind; | |
755 | } | |
756 | ||
757 | return ret; | |
758 | ||
759 | unwind: | |
760 | list_for_each_entry_continue_reverse(group, &container->group_list, | |
761 | container_next) { | |
762 | driver->ops->detach_group(data, group->iommu_group); | |
763 | } | |
764 | ||
765 | return ret; | |
766 | } | |
767 | ||
768 | static long vfio_ioctl_set_iommu(struct vfio_container *container, | |
769 | unsigned long arg) | |
770 | { | |
771 | struct vfio_iommu_driver *driver; | |
772 | long ret = -ENODEV; | |
773 | ||
774 | mutex_lock(&container->group_lock); | |
775 | ||
776 | /* | |
777 | * The container is designed to be an unprivileged interface while | |
778 | * the group can be assigned to specific users. Therefore, only by | |
779 | * adding a group to a container does the user get the privilege of | |
780 | * enabling the iommu, which may allocate finite resources. There | |
781 | * is no unset_iommu, but by removing all the groups from a container, | |
782 | * the container is deprivileged and returns to an unset state. | |
783 | */ | |
784 | if (list_empty(&container->group_list) || container->iommu_driver) { | |
785 | mutex_unlock(&container->group_lock); | |
786 | return -EINVAL; | |
787 | } | |
788 | ||
789 | mutex_lock(&vfio.iommu_drivers_lock); | |
790 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
791 | void *data; | |
792 | ||
793 | if (!try_module_get(driver->ops->owner)) | |
794 | continue; | |
795 | ||
796 | /* | |
797 | * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, | |
798 | * so test which iommu driver reported support for this | |
799 | * extension and call open on them. We also pass them the | |
800 | * magic, allowing a single driver to support multiple | |
801 | * interfaces if they'd like. | |
802 | */ | |
803 | if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { | |
804 | module_put(driver->ops->owner); | |
805 | continue; | |
806 | } | |
807 | ||
808 | /* module reference holds the driver we're working on */ | |
809 | mutex_unlock(&vfio.iommu_drivers_lock); | |
810 | ||
811 | data = driver->ops->open(arg); | |
812 | if (IS_ERR(data)) { | |
813 | ret = PTR_ERR(data); | |
814 | module_put(driver->ops->owner); | |
815 | goto skip_drivers_unlock; | |
816 | } | |
817 | ||
818 | ret = __vfio_container_attach_groups(container, driver, data); | |
819 | if (!ret) { | |
820 | container->iommu_driver = driver; | |
821 | container->iommu_data = data; | |
822 | } else { | |
823 | driver->ops->release(data); | |
824 | module_put(driver->ops->owner); | |
825 | } | |
826 | ||
827 | goto skip_drivers_unlock; | |
828 | } | |
829 | ||
830 | mutex_unlock(&vfio.iommu_drivers_lock); | |
831 | skip_drivers_unlock: | |
832 | mutex_unlock(&container->group_lock); | |
833 | ||
834 | return ret; | |
835 | } | |
836 | ||
837 | static long vfio_fops_unl_ioctl(struct file *filep, | |
838 | unsigned int cmd, unsigned long arg) | |
839 | { | |
840 | struct vfio_container *container = filep->private_data; | |
841 | struct vfio_iommu_driver *driver; | |
842 | void *data; | |
843 | long ret = -EINVAL; | |
844 | ||
845 | if (!container) | |
846 | return ret; | |
847 | ||
848 | driver = container->iommu_driver; | |
849 | data = container->iommu_data; | |
850 | ||
851 | switch (cmd) { | |
852 | case VFIO_GET_API_VERSION: | |
853 | ret = VFIO_API_VERSION; | |
854 | break; | |
855 | case VFIO_CHECK_EXTENSION: | |
856 | ret = vfio_ioctl_check_extension(container, arg); | |
857 | break; | |
858 | case VFIO_SET_IOMMU: | |
859 | ret = vfio_ioctl_set_iommu(container, arg); | |
860 | break; | |
861 | default: | |
862 | if (driver) /* passthrough all unrecognized ioctls */ | |
863 | ret = driver->ops->ioctl(data, cmd, arg); | |
864 | } | |
865 | ||
866 | return ret; | |
867 | } | |
868 | ||
869 | #ifdef CONFIG_COMPAT | |
870 | static long vfio_fops_compat_ioctl(struct file *filep, | |
871 | unsigned int cmd, unsigned long arg) | |
872 | { | |
873 | arg = (unsigned long)compat_ptr(arg); | |
874 | return vfio_fops_unl_ioctl(filep, cmd, arg); | |
875 | } | |
876 | #endif /* CONFIG_COMPAT */ | |
877 | ||
878 | static int vfio_fops_open(struct inode *inode, struct file *filep) | |
879 | { | |
880 | struct vfio_container *container; | |
881 | ||
882 | container = kzalloc(sizeof(*container), GFP_KERNEL); | |
883 | if (!container) | |
884 | return -ENOMEM; | |
885 | ||
886 | INIT_LIST_HEAD(&container->group_list); | |
887 | mutex_init(&container->group_lock); | |
888 | kref_init(&container->kref); | |
889 | ||
890 | filep->private_data = container; | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
895 | static int vfio_fops_release(struct inode *inode, struct file *filep) | |
896 | { | |
897 | struct vfio_container *container = filep->private_data; | |
898 | ||
899 | filep->private_data = NULL; | |
900 | ||
901 | vfio_container_put(container); | |
902 | ||
903 | return 0; | |
904 | } | |
905 | ||
906 | /* | |
907 | * Once an iommu driver is set, we optionally pass read/write/mmap | |
908 | * on to the driver, allowing management interfaces beyond ioctl. | |
909 | */ | |
910 | static ssize_t vfio_fops_read(struct file *filep, char __user *buf, | |
911 | size_t count, loff_t *ppos) | |
912 | { | |
913 | struct vfio_container *container = filep->private_data; | |
914 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
915 | ||
916 | if (unlikely(!driver || !driver->ops->read)) | |
917 | return -EINVAL; | |
918 | ||
919 | return driver->ops->read(container->iommu_data, buf, count, ppos); | |
920 | } | |
921 | ||
922 | static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, | |
923 | size_t count, loff_t *ppos) | |
924 | { | |
925 | struct vfio_container *container = filep->private_data; | |
926 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
927 | ||
928 | if (unlikely(!driver || !driver->ops->write)) | |
929 | return -EINVAL; | |
930 | ||
931 | return driver->ops->write(container->iommu_data, buf, count, ppos); | |
932 | } | |
933 | ||
934 | static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
935 | { | |
936 | struct vfio_container *container = filep->private_data; | |
937 | struct vfio_iommu_driver *driver = container->iommu_driver; | |
938 | ||
939 | if (unlikely(!driver || !driver->ops->mmap)) | |
940 | return -EINVAL; | |
941 | ||
942 | return driver->ops->mmap(container->iommu_data, vma); | |
943 | } | |
944 | ||
945 | static const struct file_operations vfio_fops = { | |
946 | .owner = THIS_MODULE, | |
947 | .open = vfio_fops_open, | |
948 | .release = vfio_fops_release, | |
949 | .read = vfio_fops_read, | |
950 | .write = vfio_fops_write, | |
951 | .unlocked_ioctl = vfio_fops_unl_ioctl, | |
952 | #ifdef CONFIG_COMPAT | |
953 | .compat_ioctl = vfio_fops_compat_ioctl, | |
954 | #endif | |
955 | .mmap = vfio_fops_mmap, | |
956 | }; | |
957 | ||
958 | /** | |
959 | * VFIO Group fd, /dev/vfio/$GROUP | |
960 | */ | |
961 | static void __vfio_group_unset_container(struct vfio_group *group) | |
962 | { | |
963 | struct vfio_container *container = group->container; | |
964 | struct vfio_iommu_driver *driver; | |
965 | ||
966 | mutex_lock(&container->group_lock); | |
967 | ||
968 | driver = container->iommu_driver; | |
969 | if (driver) | |
970 | driver->ops->detach_group(container->iommu_data, | |
971 | group->iommu_group); | |
972 | ||
973 | group->container = NULL; | |
974 | list_del(&group->container_next); | |
975 | ||
976 | /* Detaching the last group deprivileges a container, remove iommu */ | |
977 | if (driver && list_empty(&container->group_list)) { | |
978 | driver->ops->release(container->iommu_data); | |
979 | module_put(driver->ops->owner); | |
980 | container->iommu_driver = NULL; | |
981 | container->iommu_data = NULL; | |
982 | } | |
983 | ||
984 | mutex_unlock(&container->group_lock); | |
985 | ||
986 | vfio_container_put(container); | |
987 | } | |
988 | ||
989 | /* | |
990 | * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or | |
991 | * if there was no container to unset. Since the ioctl is called on | |
992 | * the group, we know that still exists, therefore the only valid | |
993 | * transition here is 1->0. | |
994 | */ | |
995 | static int vfio_group_unset_container(struct vfio_group *group) | |
996 | { | |
997 | int users = atomic_cmpxchg(&group->container_users, 1, 0); | |
998 | ||
999 | if (!users) | |
1000 | return -EINVAL; | |
1001 | if (users != 1) | |
1002 | return -EBUSY; | |
1003 | ||
1004 | __vfio_group_unset_container(group); | |
1005 | ||
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | /* | |
1010 | * When removing container users, anything that removes the last user | |
1011 | * implicitly removes the group from the container. That is, if the | |
1012 | * group file descriptor is closed, as well as any device file descriptors, | |
1013 | * the group is free. | |
1014 | */ | |
1015 | static void vfio_group_try_dissolve_container(struct vfio_group *group) | |
1016 | { | |
1017 | if (0 == atomic_dec_if_positive(&group->container_users)) | |
1018 | __vfio_group_unset_container(group); | |
1019 | } | |
1020 | ||
1021 | static int vfio_group_set_container(struct vfio_group *group, int container_fd) | |
1022 | { | |
1023 | struct file *filep; | |
1024 | struct vfio_container *container; | |
1025 | struct vfio_iommu_driver *driver; | |
1026 | int ret = 0; | |
1027 | ||
1028 | if (atomic_read(&group->container_users)) | |
1029 | return -EINVAL; | |
1030 | ||
1031 | filep = fget(container_fd); | |
1032 | if (!filep) | |
1033 | return -EBADF; | |
1034 | ||
1035 | /* Sanity check, is this really our fd? */ | |
1036 | if (filep->f_op != &vfio_fops) { | |
1037 | fput(filep); | |
1038 | return -EINVAL; | |
1039 | } | |
1040 | ||
1041 | container = filep->private_data; | |
1042 | WARN_ON(!container); /* fget ensures we don't race vfio_release */ | |
1043 | ||
1044 | mutex_lock(&container->group_lock); | |
1045 | ||
1046 | driver = container->iommu_driver; | |
1047 | if (driver) { | |
1048 | ret = driver->ops->attach_group(container->iommu_data, | |
1049 | group->iommu_group); | |
1050 | if (ret) | |
1051 | goto unlock_out; | |
1052 | } | |
1053 | ||
1054 | group->container = container; | |
1055 | list_add(&group->container_next, &container->group_list); | |
1056 | ||
1057 | /* Get a reference on the container and mark a user within the group */ | |
1058 | vfio_container_get(container); | |
1059 | atomic_inc(&group->container_users); | |
1060 | ||
1061 | unlock_out: | |
1062 | mutex_unlock(&container->group_lock); | |
1063 | fput(filep); | |
1064 | ||
1065 | return ret; | |
1066 | } | |
1067 | ||
1068 | static bool vfio_group_viable(struct vfio_group *group) | |
1069 | { | |
1070 | return (iommu_group_for_each_dev(group->iommu_group, | |
1071 | group, vfio_dev_viable) == 0); | |
1072 | } | |
1073 | ||
1074 | static const struct file_operations vfio_device_fops; | |
1075 | ||
1076 | static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) | |
1077 | { | |
1078 | struct vfio_device *device; | |
1079 | struct file *filep; | |
1080 | int ret = -ENODEV; | |
1081 | ||
1082 | if (0 == atomic_read(&group->container_users) || | |
1083 | !group->container->iommu_driver || !vfio_group_viable(group)) | |
1084 | return -EINVAL; | |
1085 | ||
1086 | mutex_lock(&group->device_lock); | |
1087 | list_for_each_entry(device, &group->device_list, group_next) { | |
1088 | if (strcmp(dev_name(device->dev), buf)) | |
1089 | continue; | |
1090 | ||
1091 | ret = device->ops->open(device->device_data); | |
1092 | if (ret) | |
1093 | break; | |
1094 | /* | |
1095 | * We can't use anon_inode_getfd() because we need to modify | |
1096 | * the f_mode flags directly to allow more than just ioctls | |
1097 | */ | |
1098 | ret = get_unused_fd(); | |
1099 | if (ret < 0) { | |
1100 | device->ops->release(device->device_data); | |
1101 | break; | |
1102 | } | |
1103 | ||
1104 | filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, | |
1105 | device, O_RDWR); | |
1106 | if (IS_ERR(filep)) { | |
1107 | put_unused_fd(ret); | |
1108 | ret = PTR_ERR(filep); | |
1109 | device->ops->release(device->device_data); | |
1110 | break; | |
1111 | } | |
1112 | ||
1113 | /* | |
1114 | * TODO: add an anon_inode interface to do this. | |
1115 | * Appears to be missing by lack of need rather than | |
1116 | * explicitly prevented. Now there's need. | |
1117 | */ | |
1118 | filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); | |
1119 | ||
1120 | fd_install(ret, filep); | |
1121 | ||
1122 | vfio_device_get(device); | |
1123 | atomic_inc(&group->container_users); | |
1124 | break; | |
1125 | } | |
1126 | mutex_unlock(&group->device_lock); | |
1127 | ||
1128 | return ret; | |
1129 | } | |
1130 | ||
1131 | static long vfio_group_fops_unl_ioctl(struct file *filep, | |
1132 | unsigned int cmd, unsigned long arg) | |
1133 | { | |
1134 | struct vfio_group *group = filep->private_data; | |
1135 | long ret = -ENOTTY; | |
1136 | ||
1137 | switch (cmd) { | |
1138 | case VFIO_GROUP_GET_STATUS: | |
1139 | { | |
1140 | struct vfio_group_status status; | |
1141 | unsigned long minsz; | |
1142 | ||
1143 | minsz = offsetofend(struct vfio_group_status, flags); | |
1144 | ||
1145 | if (copy_from_user(&status, (void __user *)arg, minsz)) | |
1146 | return -EFAULT; | |
1147 | ||
1148 | if (status.argsz < minsz) | |
1149 | return -EINVAL; | |
1150 | ||
1151 | status.flags = 0; | |
1152 | ||
1153 | if (vfio_group_viable(group)) | |
1154 | status.flags |= VFIO_GROUP_FLAGS_VIABLE; | |
1155 | ||
1156 | if (group->container) | |
1157 | status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET; | |
1158 | ||
1159 | if (copy_to_user((void __user *)arg, &status, minsz)) | |
1160 | return -EFAULT; | |
1161 | ||
1162 | ret = 0; | |
1163 | break; | |
1164 | } | |
1165 | case VFIO_GROUP_SET_CONTAINER: | |
1166 | { | |
1167 | int fd; | |
1168 | ||
1169 | if (get_user(fd, (int __user *)arg)) | |
1170 | return -EFAULT; | |
1171 | ||
1172 | if (fd < 0) | |
1173 | return -EINVAL; | |
1174 | ||
1175 | ret = vfio_group_set_container(group, fd); | |
1176 | break; | |
1177 | } | |
1178 | case VFIO_GROUP_UNSET_CONTAINER: | |
1179 | ret = vfio_group_unset_container(group); | |
1180 | break; | |
1181 | case VFIO_GROUP_GET_DEVICE_FD: | |
1182 | { | |
1183 | char *buf; | |
1184 | ||
1185 | buf = strndup_user((const char __user *)arg, PAGE_SIZE); | |
1186 | if (IS_ERR(buf)) | |
1187 | return PTR_ERR(buf); | |
1188 | ||
1189 | ret = vfio_group_get_device_fd(group, buf); | |
1190 | kfree(buf); | |
1191 | break; | |
1192 | } | |
1193 | } | |
1194 | ||
1195 | return ret; | |
1196 | } | |
1197 | ||
1198 | #ifdef CONFIG_COMPAT | |
1199 | static long vfio_group_fops_compat_ioctl(struct file *filep, | |
1200 | unsigned int cmd, unsigned long arg) | |
1201 | { | |
1202 | arg = (unsigned long)compat_ptr(arg); | |
1203 | return vfio_group_fops_unl_ioctl(filep, cmd, arg); | |
1204 | } | |
1205 | #endif /* CONFIG_COMPAT */ | |
1206 | ||
1207 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) | |
1208 | { | |
1209 | struct vfio_group *group; | |
1210 | ||
1211 | group = vfio_group_get_from_minor(iminor(inode)); | |
1212 | if (!group) | |
1213 | return -ENODEV; | |
1214 | ||
1215 | if (group->container) { | |
1216 | vfio_group_put(group); | |
1217 | return -EBUSY; | |
1218 | } | |
1219 | ||
1220 | filep->private_data = group; | |
1221 | ||
1222 | return 0; | |
1223 | } | |
1224 | ||
1225 | static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |
1226 | { | |
1227 | struct vfio_group *group = filep->private_data; | |
1228 | ||
1229 | filep->private_data = NULL; | |
1230 | ||
1231 | vfio_group_try_dissolve_container(group); | |
1232 | ||
1233 | vfio_group_put(group); | |
1234 | ||
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | static const struct file_operations vfio_group_fops = { | |
1239 | .owner = THIS_MODULE, | |
1240 | .unlocked_ioctl = vfio_group_fops_unl_ioctl, | |
1241 | #ifdef CONFIG_COMPAT | |
1242 | .compat_ioctl = vfio_group_fops_compat_ioctl, | |
1243 | #endif | |
1244 | .open = vfio_group_fops_open, | |
1245 | .release = vfio_group_fops_release, | |
1246 | }; | |
1247 | ||
1248 | /** | |
1249 | * VFIO Device fd | |
1250 | */ | |
1251 | static int vfio_device_fops_release(struct inode *inode, struct file *filep) | |
1252 | { | |
1253 | struct vfio_device *device = filep->private_data; | |
1254 | ||
1255 | device->ops->release(device->device_data); | |
1256 | ||
1257 | vfio_group_try_dissolve_container(device->group); | |
1258 | ||
1259 | vfio_device_put(device); | |
1260 | ||
1261 | return 0; | |
1262 | } | |
1263 | ||
1264 | static long vfio_device_fops_unl_ioctl(struct file *filep, | |
1265 | unsigned int cmd, unsigned long arg) | |
1266 | { | |
1267 | struct vfio_device *device = filep->private_data; | |
1268 | ||
1269 | if (unlikely(!device->ops->ioctl)) | |
1270 | return -EINVAL; | |
1271 | ||
1272 | return device->ops->ioctl(device->device_data, cmd, arg); | |
1273 | } | |
1274 | ||
1275 | static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, | |
1276 | size_t count, loff_t *ppos) | |
1277 | { | |
1278 | struct vfio_device *device = filep->private_data; | |
1279 | ||
1280 | if (unlikely(!device->ops->read)) | |
1281 | return -EINVAL; | |
1282 | ||
1283 | return device->ops->read(device->device_data, buf, count, ppos); | |
1284 | } | |
1285 | ||
1286 | static ssize_t vfio_device_fops_write(struct file *filep, | |
1287 | const char __user *buf, | |
1288 | size_t count, loff_t *ppos) | |
1289 | { | |
1290 | struct vfio_device *device = filep->private_data; | |
1291 | ||
1292 | if (unlikely(!device->ops->write)) | |
1293 | return -EINVAL; | |
1294 | ||
1295 | return device->ops->write(device->device_data, buf, count, ppos); | |
1296 | } | |
1297 | ||
1298 | static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
1299 | { | |
1300 | struct vfio_device *device = filep->private_data; | |
1301 | ||
1302 | if (unlikely(!device->ops->mmap)) | |
1303 | return -EINVAL; | |
1304 | ||
1305 | return device->ops->mmap(device->device_data, vma); | |
1306 | } | |
1307 | ||
1308 | #ifdef CONFIG_COMPAT | |
1309 | static long vfio_device_fops_compat_ioctl(struct file *filep, | |
1310 | unsigned int cmd, unsigned long arg) | |
1311 | { | |
1312 | arg = (unsigned long)compat_ptr(arg); | |
1313 | return vfio_device_fops_unl_ioctl(filep, cmd, arg); | |
1314 | } | |
1315 | #endif /* CONFIG_COMPAT */ | |
1316 | ||
1317 | static const struct file_operations vfio_device_fops = { | |
1318 | .owner = THIS_MODULE, | |
1319 | .release = vfio_device_fops_release, | |
1320 | .read = vfio_device_fops_read, | |
1321 | .write = vfio_device_fops_write, | |
1322 | .unlocked_ioctl = vfio_device_fops_unl_ioctl, | |
1323 | #ifdef CONFIG_COMPAT | |
1324 | .compat_ioctl = vfio_device_fops_compat_ioctl, | |
1325 | #endif | |
1326 | .mmap = vfio_device_fops_mmap, | |
1327 | }; | |
1328 | ||
1329 | /** | |
1330 | * Module/class support | |
1331 | */ | |
1332 | static char *vfio_devnode(struct device *dev, umode_t *mode) | |
1333 | { | |
1334 | return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); | |
1335 | } | |
1336 | ||
1337 | static int __init vfio_init(void) | |
1338 | { | |
1339 | int ret; | |
1340 | ||
1341 | idr_init(&vfio.group_idr); | |
1342 | mutex_init(&vfio.group_lock); | |
1343 | mutex_init(&vfio.iommu_drivers_lock); | |
1344 | INIT_LIST_HEAD(&vfio.group_list); | |
1345 | INIT_LIST_HEAD(&vfio.iommu_drivers_list); | |
1346 | init_waitqueue_head(&vfio.release_q); | |
1347 | ||
1348 | vfio.class = class_create(THIS_MODULE, "vfio"); | |
1349 | if (IS_ERR(vfio.class)) { | |
1350 | ret = PTR_ERR(vfio.class); | |
1351 | goto err_class; | |
1352 | } | |
1353 | ||
1354 | vfio.class->devnode = vfio_devnode; | |
1355 | ||
1356 | ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio"); | |
1357 | if (ret) | |
1358 | goto err_base_chrdev; | |
1359 | ||
1360 | cdev_init(&vfio.cdev, &vfio_fops); | |
1361 | ret = cdev_add(&vfio.cdev, vfio.devt, 1); | |
1362 | if (ret) | |
1363 | goto err_base_cdev; | |
1364 | ||
1365 | vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio"); | |
1366 | if (IS_ERR(vfio.dev)) { | |
1367 | ret = PTR_ERR(vfio.dev); | |
1368 | goto err_base_dev; | |
1369 | } | |
1370 | ||
1371 | /* /dev/vfio/$GROUP */ | |
1372 | cdev_init(&vfio.group_cdev, &vfio_group_fops); | |
1373 | ret = cdev_add(&vfio.group_cdev, | |
1374 | MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1); | |
1375 | if (ret) | |
1376 | goto err_groups_cdev; | |
1377 | ||
1378 | pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | |
1379 | ||
73fa0d10 AW |
1380 | /* |
1381 | * Attempt to load known iommu-drivers. This gives us a working | |
1382 | * environment without the user needing to explicitly load iommu | |
1383 | * drivers. | |
1384 | */ | |
1385 | request_module_nowait("vfio_iommu_type1"); | |
1386 | ||
cba3345c AW |
1387 | return 0; |
1388 | ||
1389 | err_groups_cdev: | |
1390 | device_destroy(vfio.class, vfio.devt); | |
1391 | err_base_dev: | |
1392 | cdev_del(&vfio.cdev); | |
1393 | err_base_cdev: | |
1394 | unregister_chrdev_region(vfio.devt, MINORMASK); | |
1395 | err_base_chrdev: | |
1396 | class_destroy(vfio.class); | |
1397 | vfio.class = NULL; | |
1398 | err_class: | |
1399 | return ret; | |
1400 | } | |
1401 | ||
1402 | static void __exit vfio_cleanup(void) | |
1403 | { | |
1404 | WARN_ON(!list_empty(&vfio.group_list)); | |
1405 | ||
1406 | idr_destroy(&vfio.group_idr); | |
1407 | cdev_del(&vfio.group_cdev); | |
1408 | device_destroy(vfio.class, vfio.devt); | |
1409 | cdev_del(&vfio.cdev); | |
1410 | unregister_chrdev_region(vfio.devt, MINORMASK); | |
1411 | class_destroy(vfio.class); | |
1412 | vfio.class = NULL; | |
1413 | } | |
1414 | ||
1415 | module_init(vfio_init); | |
1416 | module_exit(vfio_cleanup); | |
1417 | ||
1418 | MODULE_VERSION(DRIVER_VERSION); | |
1419 | MODULE_LICENSE("GPL v2"); | |
1420 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
1421 | MODULE_DESCRIPTION(DRIVER_DESC); |