Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux-2.6-block.git] / drivers / vfio / vfio.c
CommitLineData
cba3345c
AW
1/*
2 * VFIO core
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 */
15
16#include <linux/cdev.h>
17#include <linux/compat.h>
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/anon_inodes.h>
21#include <linux/fs.h>
22#include <linux/idr.h>
23#include <linux/iommu.h>
24#include <linux/list.h>
d1099901 25#include <linux/miscdevice.h>
cba3345c
AW
26#include <linux/module.h>
27#include <linux/mutex.h>
5f096b14 28#include <linux/pci.h>
9587f44a 29#include <linux/rwsem.h>
cba3345c
AW
30#include <linux/sched.h>
31#include <linux/slab.h>
664e9386 32#include <linux/stat.h>
cba3345c
AW
33#include <linux/string.h>
34#include <linux/uaccess.h>
35#include <linux/vfio.h>
36#include <linux/wait.h>
37
38#define DRIVER_VERSION "0.3"
39#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
40#define DRIVER_DESC "VFIO - User Level meta-driver"
41
42static struct vfio {
43 struct class *class;
44 struct list_head iommu_drivers_list;
45 struct mutex iommu_drivers_lock;
46 struct list_head group_list;
47 struct idr group_idr;
48 struct mutex group_lock;
49 struct cdev group_cdev;
d1099901 50 dev_t group_devt;
cba3345c
AW
51 wait_queue_head_t release_q;
52} vfio;
53
54struct vfio_iommu_driver {
55 const struct vfio_iommu_driver_ops *ops;
56 struct list_head vfio_next;
57};
58
59struct vfio_container {
60 struct kref kref;
61 struct list_head group_list;
9587f44a 62 struct rw_semaphore group_lock;
cba3345c
AW
63 struct vfio_iommu_driver *iommu_driver;
64 void *iommu_data;
033291ec 65 bool noiommu;
cba3345c
AW
66};
67
60720a0f
AW
68struct vfio_unbound_dev {
69 struct device *dev;
70 struct list_head unbound_next;
71};
72
cba3345c
AW
73struct vfio_group {
74 struct kref kref;
75 int minor;
76 atomic_t container_users;
77 struct iommu_group *iommu_group;
78 struct vfio_container *container;
79 struct list_head device_list;
80 struct mutex device_lock;
81 struct device *dev;
82 struct notifier_block nb;
83 struct list_head vfio_next;
84 struct list_head container_next;
60720a0f
AW
85 struct list_head unbound_list;
86 struct mutex unbound_lock;
6d6768c6 87 atomic_t opened;
033291ec 88 bool noiommu;
cba3345c
AW
89};
90
91struct vfio_device {
92 struct kref kref;
93 struct device *dev;
94 const struct vfio_device_ops *ops;
95 struct vfio_group *group;
96 struct list_head group_next;
97 void *device_data;
98};
99
033291ec
AW
100#ifdef CONFIG_VFIO_NOIOMMU
101static bool noiommu __read_mostly;
102module_param_named(enable_unsafe_noiommu_support,
103 noiommu, bool, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
105#endif
106
107/*
108 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
109 * and remove functions, any use cases other than acquiring the first
110 * reference for the purpose of calling vfio_add_group_dev() or removing
111 * that symmetric reference after vfio_del_group_dev() should use the raw
112 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
113 * removes the device from the dummy group and cannot be nested.
114 */
115struct iommu_group *vfio_iommu_group_get(struct device *dev)
116{
117 struct iommu_group *group;
118 int __maybe_unused ret;
119
120 group = iommu_group_get(dev);
121
122#ifdef CONFIG_VFIO_NOIOMMU
123 /*
124 * With noiommu enabled, an IOMMU group will be created for a device
125 * that doesn't already have one and doesn't have an iommu_ops on their
126 * bus. We use iommu_present() again in the main code to detect these
127 * fake groups.
128 */
129 if (group || !noiommu || iommu_present(dev->bus))
130 return group;
131
132 group = iommu_group_alloc();
133 if (IS_ERR(group))
134 return NULL;
135
136 iommu_group_set_name(group, "vfio-noiommu");
137 ret = iommu_group_add_device(group, dev);
138 iommu_group_put(group);
139 if (ret)
140 return NULL;
141
142 /*
143 * Where to taint? At this point we've added an IOMMU group for a
144 * device that is not backed by iommu_ops, therefore any iommu_
145 * callback using iommu_ops can legitimately Oops. So, while we may
146 * be about to give a DMA capable device to a user without IOMMU
147 * protection, which is clearly taint-worthy, let's go ahead and do
148 * it here.
149 */
150 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
151 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
152#endif
153
154 return group;
155}
156EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
157
158void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
159{
160#ifdef CONFIG_VFIO_NOIOMMU
161 if (!iommu_present(dev->bus))
162 iommu_group_remove_device(dev);
163#endif
164
165 iommu_group_put(group);
166}
167EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
168
169#ifdef CONFIG_VFIO_NOIOMMU
170static void *vfio_noiommu_open(unsigned long arg)
171{
172 if (arg != VFIO_NOIOMMU_IOMMU)
173 return ERR_PTR(-EINVAL);
174 if (!capable(CAP_SYS_RAWIO))
175 return ERR_PTR(-EPERM);
176
177 return NULL;
178}
179
180static void vfio_noiommu_release(void *iommu_data)
181{
182}
183
184static long vfio_noiommu_ioctl(void *iommu_data,
185 unsigned int cmd, unsigned long arg)
186{
187 if (cmd == VFIO_CHECK_EXTENSION)
188 return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0;
189
190 return -ENOTTY;
191}
192
193static int vfio_iommu_present(struct device *dev, void *unused)
194{
195 return iommu_present(dev->bus) ? 1 : 0;
196}
197
198static int vfio_noiommu_attach_group(void *iommu_data,
199 struct iommu_group *iommu_group)
200{
201 return iommu_group_for_each_dev(iommu_group, NULL,
202 vfio_iommu_present) ? -EINVAL : 0;
203}
204
205static void vfio_noiommu_detach_group(void *iommu_data,
206 struct iommu_group *iommu_group)
207{
208}
209
210static struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 .name = "vfio-noiommu",
212 .owner = THIS_MODULE,
213 .open = vfio_noiommu_open,
214 .release = vfio_noiommu_release,
215 .ioctl = vfio_noiommu_ioctl,
216 .attach_group = vfio_noiommu_attach_group,
217 .detach_group = vfio_noiommu_detach_group,
218};
219
220static struct vfio_iommu_driver vfio_noiommu_driver = {
221 .ops = &vfio_noiommu_ops,
222};
223
224/*
225 * Wrap IOMMU drivers, the noiommu driver is the one and only driver for
226 * noiommu groups (and thus containers) and not available for normal groups.
227 */
228#define vfio_for_each_iommu_driver(con, pos) \
229 for (pos = con->noiommu ? &vfio_noiommu_driver : \
230 list_first_entry(&vfio.iommu_drivers_list, \
231 struct vfio_iommu_driver, vfio_next); \
232 (con->noiommu ? pos != NULL : \
233 &pos->vfio_next != &vfio.iommu_drivers_list); \
234 pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next))
235#else
236#define vfio_for_each_iommu_driver(con, pos) \
237 list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next)
238#endif
239
240
cba3345c
AW
241/**
242 * IOMMU driver registration
243 */
244int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
245{
246 struct vfio_iommu_driver *driver, *tmp;
247
248 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
249 if (!driver)
250 return -ENOMEM;
251
252 driver->ops = ops;
253
254 mutex_lock(&vfio.iommu_drivers_lock);
255
256 /* Check for duplicates */
257 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
258 if (tmp->ops == ops) {
259 mutex_unlock(&vfio.iommu_drivers_lock);
260 kfree(driver);
261 return -EINVAL;
262 }
263 }
264
265 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
266
267 mutex_unlock(&vfio.iommu_drivers_lock);
268
269 return 0;
270}
271EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
272
273void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
274{
275 struct vfio_iommu_driver *driver;
276
277 mutex_lock(&vfio.iommu_drivers_lock);
278 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
279 if (driver->ops == ops) {
280 list_del(&driver->vfio_next);
281 mutex_unlock(&vfio.iommu_drivers_lock);
282 kfree(driver);
283 return;
284 }
285 }
286 mutex_unlock(&vfio.iommu_drivers_lock);
287}
288EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
289
290/**
291 * Group minor allocation/free - both called with vfio.group_lock held
292 */
293static int vfio_alloc_group_minor(struct vfio_group *group)
294{
d1099901 295 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
cba3345c
AW
296}
297
298static void vfio_free_group_minor(int minor)
299{
300 idr_remove(&vfio.group_idr, minor);
301}
302
303static int vfio_iommu_group_notifier(struct notifier_block *nb,
304 unsigned long action, void *data);
305static void vfio_group_get(struct vfio_group *group);
306
307/**
308 * Container objects - containers are created when /dev/vfio/vfio is
309 * opened, but their lifecycle extends until the last user is done, so
310 * it's freed via kref. Must support container/group/device being
311 * closed in any order.
312 */
313static void vfio_container_get(struct vfio_container *container)
314{
315 kref_get(&container->kref);
316}
317
318static void vfio_container_release(struct kref *kref)
319{
320 struct vfio_container *container;
321 container = container_of(kref, struct vfio_container, kref);
322
323 kfree(container);
324}
325
326static void vfio_container_put(struct vfio_container *container)
327{
328 kref_put(&container->kref, vfio_container_release);
329}
330
9df7b25a
JL
331static void vfio_group_unlock_and_free(struct vfio_group *group)
332{
333 mutex_unlock(&vfio.group_lock);
334 /*
335 * Unregister outside of lock. A spurious callback is harmless now
336 * that the group is no longer in vfio.group_list.
337 */
338 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
339 kfree(group);
340}
341
cba3345c
AW
342/**
343 * Group objects - create, release, get, put, search
344 */
033291ec
AW
345static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
346 bool noiommu)
cba3345c
AW
347{
348 struct vfio_group *group, *tmp;
349 struct device *dev;
350 int ret, minor;
351
352 group = kzalloc(sizeof(*group), GFP_KERNEL);
353 if (!group)
354 return ERR_PTR(-ENOMEM);
355
356 kref_init(&group->kref);
357 INIT_LIST_HEAD(&group->device_list);
358 mutex_init(&group->device_lock);
60720a0f
AW
359 INIT_LIST_HEAD(&group->unbound_list);
360 mutex_init(&group->unbound_lock);
cba3345c 361 atomic_set(&group->container_users, 0);
6d6768c6 362 atomic_set(&group->opened, 0);
cba3345c 363 group->iommu_group = iommu_group;
033291ec 364 group->noiommu = noiommu;
cba3345c
AW
365
366 group->nb.notifier_call = vfio_iommu_group_notifier;
367
368 /*
369 * blocking notifiers acquire a rwsem around registering and hold
370 * it around callback. Therefore, need to register outside of
371 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
372 * do anything unless it can find the group in vfio.group_list, so
373 * no harm in registering early.
374 */
375 ret = iommu_group_register_notifier(iommu_group, &group->nb);
376 if (ret) {
377 kfree(group);
378 return ERR_PTR(ret);
379 }
380
381 mutex_lock(&vfio.group_lock);
382
cba3345c
AW
383 /* Did we race creating this group? */
384 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
385 if (tmp->iommu_group == iommu_group) {
386 vfio_group_get(tmp);
9df7b25a 387 vfio_group_unlock_and_free(group);
cba3345c
AW
388 return tmp;
389 }
390 }
391
2f51bf4b
ZL
392 minor = vfio_alloc_group_minor(group);
393 if (minor < 0) {
394 vfio_group_unlock_and_free(group);
395 return ERR_PTR(minor);
396 }
397
d1099901
AW
398 dev = device_create(vfio.class, NULL,
399 MKDEV(MAJOR(vfio.group_devt), minor),
033291ec
AW
400 group, "%s%d", noiommu ? "noiommu-" : "",
401 iommu_group_id(iommu_group));
cba3345c
AW
402 if (IS_ERR(dev)) {
403 vfio_free_group_minor(minor);
9df7b25a 404 vfio_group_unlock_and_free(group);
cba3345c
AW
405 return (struct vfio_group *)dev; /* ERR_PTR */
406 }
407
408 group->minor = minor;
409 group->dev = dev;
410
411 list_add(&group->vfio_next, &vfio.group_list);
412
413 mutex_unlock(&vfio.group_lock);
414
415 return group;
416}
417
6d2cd3ce 418/* called with vfio.group_lock held */
cba3345c
AW
419static void vfio_group_release(struct kref *kref)
420{
421 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
60720a0f 422 struct vfio_unbound_dev *unbound, *tmp;
4a68810d 423 struct iommu_group *iommu_group = group->iommu_group;
cba3345c
AW
424
425 WARN_ON(!list_empty(&group->device_list));
426
60720a0f
AW
427 list_for_each_entry_safe(unbound, tmp,
428 &group->unbound_list, unbound_next) {
429 list_del(&unbound->unbound_next);
430 kfree(unbound);
431 }
432
d1099901 433 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
cba3345c
AW
434 list_del(&group->vfio_next);
435 vfio_free_group_minor(group->minor);
9df7b25a 436 vfio_group_unlock_and_free(group);
4a68810d 437 iommu_group_put(iommu_group);
cba3345c
AW
438}
439
440static void vfio_group_put(struct vfio_group *group)
441{
6d2cd3ce 442 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
cba3345c
AW
443}
444
445/* Assume group_lock or group reference is held */
446static void vfio_group_get(struct vfio_group *group)
447{
448 kref_get(&group->kref);
449}
450
451/*
452 * Not really a try as we will sleep for mutex, but we need to make
453 * sure the group pointer is valid under lock and get a reference.
454 */
455static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
456{
457 struct vfio_group *target = group;
458
459 mutex_lock(&vfio.group_lock);
460 list_for_each_entry(group, &vfio.group_list, vfio_next) {
461 if (group == target) {
462 vfio_group_get(group);
463 mutex_unlock(&vfio.group_lock);
464 return group;
465 }
466 }
467 mutex_unlock(&vfio.group_lock);
468
469 return NULL;
470}
471
472static
473struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
474{
475 struct vfio_group *group;
476
477 mutex_lock(&vfio.group_lock);
478 list_for_each_entry(group, &vfio.group_list, vfio_next) {
479 if (group->iommu_group == iommu_group) {
480 vfio_group_get(group);
481 mutex_unlock(&vfio.group_lock);
482 return group;
483 }
484 }
485 mutex_unlock(&vfio.group_lock);
486
487 return NULL;
488}
489
490static struct vfio_group *vfio_group_get_from_minor(int minor)
491{
492 struct vfio_group *group;
493
494 mutex_lock(&vfio.group_lock);
495 group = idr_find(&vfio.group_idr, minor);
496 if (!group) {
497 mutex_unlock(&vfio.group_lock);
498 return NULL;
499 }
500 vfio_group_get(group);
501 mutex_unlock(&vfio.group_lock);
502
503 return group;
504}
505
506/**
507 * Device objects - create, release, get, put, search
508 */
509static
510struct vfio_device *vfio_group_create_device(struct vfio_group *group,
511 struct device *dev,
512 const struct vfio_device_ops *ops,
513 void *device_data)
514{
515 struct vfio_device *device;
cba3345c
AW
516
517 device = kzalloc(sizeof(*device), GFP_KERNEL);
518 if (!device)
519 return ERR_PTR(-ENOMEM);
520
521 kref_init(&device->kref);
522 device->dev = dev;
523 device->group = group;
524 device->ops = ops;
525 device->device_data = device_data;
8283b491 526 dev_set_drvdata(dev, device);
cba3345c
AW
527
528 /* No need to get group_lock, caller has group reference */
529 vfio_group_get(group);
530
531 mutex_lock(&group->device_lock);
532 list_add(&device->group_next, &group->device_list);
533 mutex_unlock(&group->device_lock);
534
535 return device;
536}
537
538static void vfio_device_release(struct kref *kref)
539{
540 struct vfio_device *device = container_of(kref,
541 struct vfio_device, kref);
542 struct vfio_group *group = device->group;
543
cba3345c
AW
544 list_del(&device->group_next);
545 mutex_unlock(&group->device_lock);
546
547 dev_set_drvdata(device->dev, NULL);
548
549 kfree(device);
550
551 /* vfio_del_group_dev may be waiting for this device */
552 wake_up(&vfio.release_q);
553}
554
555/* Device reference always implies a group reference */
44f50716 556void vfio_device_put(struct vfio_device *device)
cba3345c 557{
934ad4c2 558 struct vfio_group *group = device->group;
90b1253e 559 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
934ad4c2 560 vfio_group_put(group);
cba3345c 561}
44f50716 562EXPORT_SYMBOL_GPL(vfio_device_put);
cba3345c
AW
563
564static void vfio_device_get(struct vfio_device *device)
565{
566 vfio_group_get(device->group);
567 kref_get(&device->kref);
568}
569
570static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
571 struct device *dev)
572{
573 struct vfio_device *device;
574
575 mutex_lock(&group->device_lock);
576 list_for_each_entry(device, &group->device_list, group_next) {
577 if (device->dev == dev) {
578 vfio_device_get(device);
579 mutex_unlock(&group->device_lock);
580 return device;
581 }
582 }
583 mutex_unlock(&group->device_lock);
584 return NULL;
585}
586
587/*
5f096b14
AW
588 * Some drivers, like pci-stub, are only used to prevent other drivers from
589 * claiming a device and are therefore perfectly legitimate for a user owned
590 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
591 * of the device, but it does prevent the user from having direct access to
592 * the device, which is useful in some circumstances.
593 *
594 * We also assume that we can include PCI interconnect devices, ie. bridges.
595 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
596 * then all of the downstream devices will be part of the same IOMMU group as
597 * the bridge. Thus, if placing the bridge into the user owned IOVA space
598 * breaks anything, it only does so for user owned devices downstream. Note
599 * that error notification via MSI can be affected for platforms that handle
600 * MSI within the same IOVA space as DMA.
cba3345c 601 */
5f096b14 602static const char * const vfio_driver_whitelist[] = { "pci-stub" };
cba3345c 603
5f096b14 604static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
cba3345c
AW
605{
606 int i;
607
5f096b14
AW
608 if (dev_is_pci(dev)) {
609 struct pci_dev *pdev = to_pci_dev(dev);
610
611 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
612 return true;
613 }
614
cba3345c
AW
615 for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) {
616 if (!strcmp(drv->name, vfio_driver_whitelist[i]))
617 return true;
618 }
619
620 return false;
621}
622
623/*
60720a0f
AW
624 * A vfio group is viable for use by userspace if all devices are in
625 * one of the following states:
626 * - driver-less
627 * - bound to a vfio driver
628 * - bound to a whitelisted driver
5f096b14 629 * - a PCI interconnect device
60720a0f
AW
630 *
631 * We use two methods to determine whether a device is bound to a vfio
632 * driver. The first is to test whether the device exists in the vfio
633 * group. The second is to test if the device exists on the group
634 * unbound_list, indicating it's in the middle of transitioning from
635 * a vfio driver to driver-less.
cba3345c
AW
636 */
637static int vfio_dev_viable(struct device *dev, void *data)
638{
639 struct vfio_group *group = data;
640 struct vfio_device *device;
de2b3eea 641 struct device_driver *drv = ACCESS_ONCE(dev->driver);
60720a0f
AW
642 struct vfio_unbound_dev *unbound;
643 int ret = -EINVAL;
644
645 mutex_lock(&group->unbound_lock);
646 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
647 if (dev == unbound->dev) {
648 ret = 0;
649 break;
650 }
651 }
652 mutex_unlock(&group->unbound_lock);
cba3345c 653
5f096b14 654 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
cba3345c
AW
655 return 0;
656
657 device = vfio_group_get_device(group, dev);
658 if (device) {
659 vfio_device_put(device);
660 return 0;
661 }
662
60720a0f 663 return ret;
cba3345c
AW
664}
665
666/**
667 * Async device support
668 */
669static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
670{
671 struct vfio_device *device;
672
673 /* Do we already know about it? We shouldn't */
674 device = vfio_group_get_device(group, dev);
675 if (WARN_ON_ONCE(device)) {
676 vfio_device_put(device);
677 return 0;
678 }
679
680 /* Nothing to do for idle groups */
681 if (!atomic_read(&group->container_users))
682 return 0;
683
684 /* TODO Prevent device auto probing */
685 WARN("Device %s added to live group %d!\n", dev_name(dev),
686 iommu_group_id(group->iommu_group));
687
688 return 0;
689}
690
cba3345c
AW
691static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
692{
693 /* We don't care what happens when the group isn't in use */
694 if (!atomic_read(&group->container_users))
695 return 0;
696
697 return vfio_dev_viable(dev, group);
698}
699
700static int vfio_iommu_group_notifier(struct notifier_block *nb,
701 unsigned long action, void *data)
702{
703 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
704 struct device *dev = data;
60720a0f 705 struct vfio_unbound_dev *unbound;
cba3345c
AW
706
707 /*
c6401930
AW
708 * Need to go through a group_lock lookup to get a reference or we
709 * risk racing a group being removed. Ignore spurious notifies.
cba3345c
AW
710 */
711 group = vfio_group_try_get(group);
c6401930 712 if (!group)
cba3345c
AW
713 return NOTIFY_OK;
714
715 switch (action) {
716 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
717 vfio_group_nb_add_dev(group, dev);
718 break;
719 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
de9c7602
AW
720 /*
721 * Nothing to do here. If the device is in use, then the
722 * vfio sub-driver should block the remove callback until
723 * it is unused. If the device is unused or attached to a
724 * stub driver, then it should be released and we don't
725 * care that it will be going away.
726 */
cba3345c
AW
727 break;
728 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
729 pr_debug("%s: Device %s, group %d binding to driver\n",
730 __func__, dev_name(dev),
731 iommu_group_id(group->iommu_group));
732 break;
733 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
734 pr_debug("%s: Device %s, group %d bound to driver %s\n",
735 __func__, dev_name(dev),
736 iommu_group_id(group->iommu_group), dev->driver->name);
737 BUG_ON(vfio_group_nb_verify(group, dev));
738 break;
739 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
740 pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
741 __func__, dev_name(dev),
742 iommu_group_id(group->iommu_group), dev->driver->name);
743 break;
744 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
745 pr_debug("%s: Device %s, group %d unbound from driver\n",
746 __func__, dev_name(dev),
747 iommu_group_id(group->iommu_group));
748 /*
749 * XXX An unbound device in a live group is ok, but we'd
750 * really like to avoid the above BUG_ON by preventing other
751 * drivers from binding to it. Once that occurs, we have to
752 * stop the system to maintain isolation. At a minimum, we'd
753 * want a toggle to disable driver auto probe for this device.
754 */
60720a0f
AW
755
756 mutex_lock(&group->unbound_lock);
757 list_for_each_entry(unbound,
758 &group->unbound_list, unbound_next) {
759 if (dev == unbound->dev) {
760 list_del(&unbound->unbound_next);
761 kfree(unbound);
762 break;
763 }
764 }
765 mutex_unlock(&group->unbound_lock);
cba3345c
AW
766 break;
767 }
768
769 vfio_group_put(group);
770 return NOTIFY_OK;
771}
772
773/**
774 * VFIO driver API
775 */
776int vfio_add_group_dev(struct device *dev,
777 const struct vfio_device_ops *ops, void *device_data)
778{
779 struct iommu_group *iommu_group;
780 struct vfio_group *group;
781 struct vfio_device *device;
782
783 iommu_group = iommu_group_get(dev);
784 if (!iommu_group)
785 return -EINVAL;
786
787 group = vfio_group_get_from_iommu(iommu_group);
788 if (!group) {
033291ec
AW
789 group = vfio_create_group(iommu_group,
790 !iommu_present(dev->bus));
cba3345c
AW
791 if (IS_ERR(group)) {
792 iommu_group_put(iommu_group);
793 return PTR_ERR(group);
794 }
4a68810d
AW
795 } else {
796 /*
797 * A found vfio_group already holds a reference to the
798 * iommu_group. A created vfio_group keeps the reference.
799 */
800 iommu_group_put(iommu_group);
cba3345c
AW
801 }
802
803 device = vfio_group_get_device(group, dev);
804 if (device) {
805 WARN(1, "Device %s already exists on group %d\n",
806 dev_name(dev), iommu_group_id(iommu_group));
807 vfio_device_put(device);
808 vfio_group_put(group);
cba3345c
AW
809 return -EBUSY;
810 }
811
812 device = vfio_group_create_device(group, dev, ops, device_data);
813 if (IS_ERR(device)) {
814 vfio_group_put(group);
cba3345c
AW
815 return PTR_ERR(device);
816 }
817
818 /*
4a68810d
AW
819 * Drop all but the vfio_device reference. The vfio_device holds
820 * a reference to the vfio_group, which holds a reference to the
821 * iommu_group.
cba3345c
AW
822 */
823 vfio_group_put(group);
824
825 return 0;
826}
827EXPORT_SYMBOL_GPL(vfio_add_group_dev);
828
44f50716 829/**
20f30017
AW
830 * Get a reference to the vfio_device for a device. Even if the
831 * caller thinks they own the device, they could be racing with a
832 * release call path, so we can't trust drvdata for the shortcut.
833 * Go the long way around, from the iommu_group to the vfio_group
834 * to the vfio_device.
44f50716
VMP
835 */
836struct vfio_device *vfio_device_get_from_dev(struct device *dev)
837{
20f30017
AW
838 struct iommu_group *iommu_group;
839 struct vfio_group *group;
840 struct vfio_device *device;
841
842 iommu_group = iommu_group_get(dev);
843 if (!iommu_group)
844 return NULL;
44f50716 845
20f30017
AW
846 group = vfio_group_get_from_iommu(iommu_group);
847 iommu_group_put(iommu_group);
848 if (!group)
849 return NULL;
850
851 device = vfio_group_get_device(group, dev);
852 vfio_group_put(group);
44f50716
VMP
853
854 return device;
855}
856EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
857
4bc94d5d
AW
858static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
859 char *buf)
860{
e324fc82 861 struct vfio_device *it, *device = NULL;
4bc94d5d
AW
862
863 mutex_lock(&group->device_lock);
e324fc82
JR
864 list_for_each_entry(it, &group->device_list, group_next) {
865 if (!strcmp(dev_name(it->dev), buf)) {
866 device = it;
4bc94d5d
AW
867 vfio_device_get(device);
868 break;
869 }
870 }
871 mutex_unlock(&group->device_lock);
872
873 return device;
874}
875
44f50716
VMP
876/*
877 * Caller must hold a reference to the vfio_device
878 */
879void *vfio_device_data(struct vfio_device *device)
880{
881 return device->device_data;
882}
883EXPORT_SYMBOL_GPL(vfio_device_data);
884
e014e944
AW
885/* Given a referenced group, check if it contains the device */
886static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
cba3345c 887{
cba3345c
AW
888 struct vfio_device *device;
889
cba3345c 890 device = vfio_group_get_device(group, dev);
e014e944 891 if (!device)
cba3345c 892 return false;
cba3345c
AW
893
894 vfio_device_put(device);
cba3345c
AW
895 return true;
896}
897
898/*
899 * Decrement the device reference count and wait for the device to be
900 * removed. Open file descriptors for the device... */
901void *vfio_del_group_dev(struct device *dev)
902{
903 struct vfio_device *device = dev_get_drvdata(dev);
904 struct vfio_group *group = device->group;
cba3345c 905 void *device_data = device->device_data;
60720a0f 906 struct vfio_unbound_dev *unbound;
13060b64 907 unsigned int i = 0;
db7d4d7f
AW
908 long ret;
909 bool interrupted = false;
cba3345c 910
e014e944
AW
911 /*
912 * The group exists so long as we have a device reference. Get
913 * a group reference and use it to scan for the device going away.
914 */
915 vfio_group_get(group);
916
60720a0f
AW
917 /*
918 * When the device is removed from the group, the group suddenly
919 * becomes non-viable; the device has a driver (until the unbind
920 * completes), but it's not present in the group. This is bad news
921 * for any external users that need to re-acquire a group reference
922 * in order to match and release their existing reference. To
923 * solve this, we track such devices on the unbound_list to bridge
924 * the gap until they're fully unbound.
925 */
926 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
927 if (unbound) {
928 unbound->dev = dev;
929 mutex_lock(&group->unbound_lock);
930 list_add(&unbound->unbound_next, &group->unbound_list);
931 mutex_unlock(&group->unbound_lock);
932 }
933 WARN_ON(!unbound);
934
cba3345c
AW
935 vfio_device_put(device);
936
13060b64
AW
937 /*
938 * If the device is still present in the group after the above
939 * 'put', then it is in use and we need to request it from the
940 * bus driver. The driver may in turn need to request the
941 * device from the user. We send the request on an arbitrary
942 * interval with counter to allow the driver to take escalating
943 * measures to release the device if it has the ability to do so.
944 */
945 do {
946 device = vfio_group_get_device(group, dev);
947 if (!device)
948 break;
949
950 if (device->ops->request)
951 device->ops->request(device_data, i++);
952
953 vfio_device_put(device);
954
db7d4d7f
AW
955 if (interrupted) {
956 ret = wait_event_timeout(vfio.release_q,
957 !vfio_dev_present(group, dev), HZ * 10);
958 } else {
959 ret = wait_event_interruptible_timeout(vfio.release_q,
960 !vfio_dev_present(group, dev), HZ * 10);
961 if (ret == -ERESTARTSYS) {
962 interrupted = true;
963 dev_warn(dev,
964 "Device is currently in use, task"
965 " \"%s\" (%d) "
966 "blocked until device is released",
967 current->comm, task_pid_nr(current));
968 }
969 }
970 } while (ret <= 0);
e014e944
AW
971
972 vfio_group_put(group);
cba3345c 973
cba3345c
AW
974 return device_data;
975}
976EXPORT_SYMBOL_GPL(vfio_del_group_dev);
977
978/**
979 * VFIO base fd, /dev/vfio/vfio
980 */
981static long vfio_ioctl_check_extension(struct vfio_container *container,
982 unsigned long arg)
983{
0b43c082 984 struct vfio_iommu_driver *driver;
cba3345c
AW
985 long ret = 0;
986
0b43c082
AW
987 down_read(&container->group_lock);
988
989 driver = container->iommu_driver;
990
cba3345c
AW
991 switch (arg) {
992 /* No base extensions yet */
993 default:
994 /*
995 * If no driver is set, poll all registered drivers for
996 * extensions and return the first positive result. If
997 * a driver is already set, further queries will be passed
998 * only to that driver.
999 */
1000 if (!driver) {
1001 mutex_lock(&vfio.iommu_drivers_lock);
033291ec 1002 vfio_for_each_iommu_driver(container, driver) {
cba3345c
AW
1003 if (!try_module_get(driver->ops->owner))
1004 continue;
1005
1006 ret = driver->ops->ioctl(NULL,
1007 VFIO_CHECK_EXTENSION,
1008 arg);
1009 module_put(driver->ops->owner);
1010 if (ret > 0)
1011 break;
1012 }
1013 mutex_unlock(&vfio.iommu_drivers_lock);
1014 } else
1015 ret = driver->ops->ioctl(container->iommu_data,
1016 VFIO_CHECK_EXTENSION, arg);
1017 }
1018
0b43c082
AW
1019 up_read(&container->group_lock);
1020
cba3345c
AW
1021 return ret;
1022}
1023
9587f44a 1024/* hold write lock on container->group_lock */
cba3345c
AW
1025static int __vfio_container_attach_groups(struct vfio_container *container,
1026 struct vfio_iommu_driver *driver,
1027 void *data)
1028{
1029 struct vfio_group *group;
1030 int ret = -ENODEV;
1031
1032 list_for_each_entry(group, &container->group_list, container_next) {
1033 ret = driver->ops->attach_group(data, group->iommu_group);
1034 if (ret)
1035 goto unwind;
1036 }
1037
1038 return ret;
1039
1040unwind:
1041 list_for_each_entry_continue_reverse(group, &container->group_list,
1042 container_next) {
1043 driver->ops->detach_group(data, group->iommu_group);
1044 }
1045
1046 return ret;
1047}
1048
1049static long vfio_ioctl_set_iommu(struct vfio_container *container,
1050 unsigned long arg)
1051{
1052 struct vfio_iommu_driver *driver;
1053 long ret = -ENODEV;
1054
9587f44a 1055 down_write(&container->group_lock);
cba3345c
AW
1056
1057 /*
1058 * The container is designed to be an unprivileged interface while
1059 * the group can be assigned to specific users. Therefore, only by
1060 * adding a group to a container does the user get the privilege of
1061 * enabling the iommu, which may allocate finite resources. There
1062 * is no unset_iommu, but by removing all the groups from a container,
1063 * the container is deprivileged and returns to an unset state.
1064 */
1065 if (list_empty(&container->group_list) || container->iommu_driver) {
9587f44a 1066 up_write(&container->group_lock);
cba3345c
AW
1067 return -EINVAL;
1068 }
1069
1070 mutex_lock(&vfio.iommu_drivers_lock);
033291ec 1071 vfio_for_each_iommu_driver(container, driver) {
cba3345c
AW
1072 void *data;
1073
1074 if (!try_module_get(driver->ops->owner))
1075 continue;
1076
1077 /*
1078 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1079 * so test which iommu driver reported support for this
1080 * extension and call open on them. We also pass them the
1081 * magic, allowing a single driver to support multiple
1082 * interfaces if they'd like.
1083 */
1084 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1085 module_put(driver->ops->owner);
1086 continue;
1087 }
1088
1089 /* module reference holds the driver we're working on */
1090 mutex_unlock(&vfio.iommu_drivers_lock);
1091
1092 data = driver->ops->open(arg);
1093 if (IS_ERR(data)) {
1094 ret = PTR_ERR(data);
1095 module_put(driver->ops->owner);
1096 goto skip_drivers_unlock;
1097 }
1098
1099 ret = __vfio_container_attach_groups(container, driver, data);
1100 if (!ret) {
1101 container->iommu_driver = driver;
1102 container->iommu_data = data;
1103 } else {
1104 driver->ops->release(data);
1105 module_put(driver->ops->owner);
1106 }
1107
1108 goto skip_drivers_unlock;
1109 }
1110
1111 mutex_unlock(&vfio.iommu_drivers_lock);
1112skip_drivers_unlock:
9587f44a 1113 up_write(&container->group_lock);
cba3345c
AW
1114
1115 return ret;
1116}
1117
1118static long vfio_fops_unl_ioctl(struct file *filep,
1119 unsigned int cmd, unsigned long arg)
1120{
1121 struct vfio_container *container = filep->private_data;
1122 struct vfio_iommu_driver *driver;
1123 void *data;
1124 long ret = -EINVAL;
1125
1126 if (!container)
1127 return ret;
1128
cba3345c
AW
1129 switch (cmd) {
1130 case VFIO_GET_API_VERSION:
1131 ret = VFIO_API_VERSION;
1132 break;
1133 case VFIO_CHECK_EXTENSION:
1134 ret = vfio_ioctl_check_extension(container, arg);
1135 break;
1136 case VFIO_SET_IOMMU:
1137 ret = vfio_ioctl_set_iommu(container, arg);
1138 break;
1139 default:
0b43c082
AW
1140 down_read(&container->group_lock);
1141
1142 driver = container->iommu_driver;
1143 data = container->iommu_data;
1144
cba3345c
AW
1145 if (driver) /* passthrough all unrecognized ioctls */
1146 ret = driver->ops->ioctl(data, cmd, arg);
0b43c082
AW
1147
1148 up_read(&container->group_lock);
cba3345c
AW
1149 }
1150
1151 return ret;
1152}
1153
1154#ifdef CONFIG_COMPAT
1155static long vfio_fops_compat_ioctl(struct file *filep,
1156 unsigned int cmd, unsigned long arg)
1157{
1158 arg = (unsigned long)compat_ptr(arg);
1159 return vfio_fops_unl_ioctl(filep, cmd, arg);
1160}
1161#endif /* CONFIG_COMPAT */
1162
1163static int vfio_fops_open(struct inode *inode, struct file *filep)
1164{
1165 struct vfio_container *container;
1166
1167 container = kzalloc(sizeof(*container), GFP_KERNEL);
1168 if (!container)
1169 return -ENOMEM;
1170
1171 INIT_LIST_HEAD(&container->group_list);
9587f44a 1172 init_rwsem(&container->group_lock);
cba3345c
AW
1173 kref_init(&container->kref);
1174
1175 filep->private_data = container;
1176
1177 return 0;
1178}
1179
1180static int vfio_fops_release(struct inode *inode, struct file *filep)
1181{
1182 struct vfio_container *container = filep->private_data;
1183
1184 filep->private_data = NULL;
1185
1186 vfio_container_put(container);
1187
1188 return 0;
1189}
1190
1191/*
1192 * Once an iommu driver is set, we optionally pass read/write/mmap
1193 * on to the driver, allowing management interfaces beyond ioctl.
1194 */
1195static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1196 size_t count, loff_t *ppos)
1197{
1198 struct vfio_container *container = filep->private_data;
0b43c082
AW
1199 struct vfio_iommu_driver *driver;
1200 ssize_t ret = -EINVAL;
cba3345c 1201
0b43c082
AW
1202 down_read(&container->group_lock);
1203
1204 driver = container->iommu_driver;
1205 if (likely(driver && driver->ops->read))
1206 ret = driver->ops->read(container->iommu_data,
1207 buf, count, ppos);
cba3345c 1208
0b43c082
AW
1209 up_read(&container->group_lock);
1210
1211 return ret;
cba3345c
AW
1212}
1213
1214static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct vfio_container *container = filep->private_data;
0b43c082
AW
1218 struct vfio_iommu_driver *driver;
1219 ssize_t ret = -EINVAL;
cba3345c 1220
0b43c082
AW
1221 down_read(&container->group_lock);
1222
1223 driver = container->iommu_driver;
1224 if (likely(driver && driver->ops->write))
1225 ret = driver->ops->write(container->iommu_data,
1226 buf, count, ppos);
1227
1228 up_read(&container->group_lock);
cba3345c 1229
0b43c082 1230 return ret;
cba3345c
AW
1231}
1232
1233static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1234{
1235 struct vfio_container *container = filep->private_data;
0b43c082
AW
1236 struct vfio_iommu_driver *driver;
1237 int ret = -EINVAL;
cba3345c 1238
0b43c082 1239 down_read(&container->group_lock);
cba3345c 1240
0b43c082
AW
1241 driver = container->iommu_driver;
1242 if (likely(driver && driver->ops->mmap))
1243 ret = driver->ops->mmap(container->iommu_data, vma);
1244
1245 up_read(&container->group_lock);
1246
1247 return ret;
cba3345c
AW
1248}
1249
1250static const struct file_operations vfio_fops = {
1251 .owner = THIS_MODULE,
1252 .open = vfio_fops_open,
1253 .release = vfio_fops_release,
1254 .read = vfio_fops_read,
1255 .write = vfio_fops_write,
1256 .unlocked_ioctl = vfio_fops_unl_ioctl,
1257#ifdef CONFIG_COMPAT
1258 .compat_ioctl = vfio_fops_compat_ioctl,
1259#endif
1260 .mmap = vfio_fops_mmap,
1261};
1262
1263/**
1264 * VFIO Group fd, /dev/vfio/$GROUP
1265 */
1266static void __vfio_group_unset_container(struct vfio_group *group)
1267{
1268 struct vfio_container *container = group->container;
1269 struct vfio_iommu_driver *driver;
1270
9587f44a 1271 down_write(&container->group_lock);
cba3345c
AW
1272
1273 driver = container->iommu_driver;
1274 if (driver)
1275 driver->ops->detach_group(container->iommu_data,
1276 group->iommu_group);
1277
1278 group->container = NULL;
1279 list_del(&group->container_next);
1280
1281 /* Detaching the last group deprivileges a container, remove iommu */
1282 if (driver && list_empty(&container->group_list)) {
1283 driver->ops->release(container->iommu_data);
1284 module_put(driver->ops->owner);
1285 container->iommu_driver = NULL;
1286 container->iommu_data = NULL;
1287 }
1288
9587f44a 1289 up_write(&container->group_lock);
cba3345c
AW
1290
1291 vfio_container_put(container);
1292}
1293
1294/*
1295 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1296 * if there was no container to unset. Since the ioctl is called on
1297 * the group, we know that still exists, therefore the only valid
1298 * transition here is 1->0.
1299 */
1300static int vfio_group_unset_container(struct vfio_group *group)
1301{
1302 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1303
1304 if (!users)
1305 return -EINVAL;
1306 if (users != 1)
1307 return -EBUSY;
1308
1309 __vfio_group_unset_container(group);
1310
1311 return 0;
1312}
1313
1314/*
1315 * When removing container users, anything that removes the last user
1316 * implicitly removes the group from the container. That is, if the
1317 * group file descriptor is closed, as well as any device file descriptors,
1318 * the group is free.
1319 */
1320static void vfio_group_try_dissolve_container(struct vfio_group *group)
1321{
1322 if (0 == atomic_dec_if_positive(&group->container_users))
1323 __vfio_group_unset_container(group);
1324}
1325
1326static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1327{
2903ff01 1328 struct fd f;
cba3345c
AW
1329 struct vfio_container *container;
1330 struct vfio_iommu_driver *driver;
2903ff01 1331 int ret = 0;
cba3345c
AW
1332
1333 if (atomic_read(&group->container_users))
1334 return -EINVAL;
1335
033291ec
AW
1336 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1337 return -EPERM;
1338
2903ff01
AV
1339 f = fdget(container_fd);
1340 if (!f.file)
cba3345c
AW
1341 return -EBADF;
1342
1343 /* Sanity check, is this really our fd? */
2903ff01
AV
1344 if (f.file->f_op != &vfio_fops) {
1345 fdput(f);
cba3345c
AW
1346 return -EINVAL;
1347 }
1348
2903ff01 1349 container = f.file->private_data;
cba3345c
AW
1350 WARN_ON(!container); /* fget ensures we don't race vfio_release */
1351
9587f44a 1352 down_write(&container->group_lock);
cba3345c 1353
033291ec
AW
1354 /* Real groups and fake groups cannot mix */
1355 if (!list_empty(&container->group_list) &&
1356 container->noiommu != group->noiommu) {
1357 ret = -EPERM;
1358 goto unlock_out;
1359 }
1360
cba3345c
AW
1361 driver = container->iommu_driver;
1362 if (driver) {
1363 ret = driver->ops->attach_group(container->iommu_data,
1364 group->iommu_group);
1365 if (ret)
1366 goto unlock_out;
1367 }
1368
1369 group->container = container;
033291ec 1370 container->noiommu = group->noiommu;
cba3345c
AW
1371 list_add(&group->container_next, &container->group_list);
1372
1373 /* Get a reference on the container and mark a user within the group */
1374 vfio_container_get(container);
1375 atomic_inc(&group->container_users);
1376
1377unlock_out:
9587f44a 1378 up_write(&container->group_lock);
2903ff01 1379 fdput(f);
cba3345c
AW
1380 return ret;
1381}
1382
1383static bool vfio_group_viable(struct vfio_group *group)
1384{
1385 return (iommu_group_for_each_dev(group->iommu_group,
1386 group, vfio_dev_viable) == 0);
1387}
1388
1389static const struct file_operations vfio_device_fops;
1390
1391static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1392{
1393 struct vfio_device *device;
1394 struct file *filep;
4bc94d5d 1395 int ret;
cba3345c
AW
1396
1397 if (0 == atomic_read(&group->container_users) ||
1398 !group->container->iommu_driver || !vfio_group_viable(group))
1399 return -EINVAL;
1400
033291ec
AW
1401 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1402 return -EPERM;
1403
4bc94d5d
AW
1404 device = vfio_device_get_from_name(group, buf);
1405 if (!device)
1406 return -ENODEV;
cba3345c 1407
4bc94d5d
AW
1408 ret = device->ops->open(device->device_data);
1409 if (ret) {
1410 vfio_device_put(device);
1411 return ret;
1412 }
cba3345c 1413
4bc94d5d
AW
1414 /*
1415 * We can't use anon_inode_getfd() because we need to modify
1416 * the f_mode flags directly to allow more than just ioctls
1417 */
1418 ret = get_unused_fd_flags(O_CLOEXEC);
1419 if (ret < 0) {
1420 device->ops->release(device->device_data);
1421 vfio_device_put(device);
1422 return ret;
1423 }
cba3345c 1424
4bc94d5d
AW
1425 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1426 device, O_RDWR);
1427 if (IS_ERR(filep)) {
1428 put_unused_fd(ret);
1429 ret = PTR_ERR(filep);
1430 device->ops->release(device->device_data);
1431 vfio_device_put(device);
1432 return ret;
1433 }
1434
1435 /*
1436 * TODO: add an anon_inode interface to do this.
1437 * Appears to be missing by lack of need rather than
1438 * explicitly prevented. Now there's need.
1439 */
1440 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
cba3345c 1441
4bc94d5d 1442 atomic_inc(&group->container_users);
31605deb 1443
4bc94d5d 1444 fd_install(ret, filep);
cba3345c 1445
033291ec
AW
1446 if (group->noiommu)
1447 dev_warn(device->dev, "vfio-noiommu device opened by user "
1448 "(%s:%d)\n", current->comm, task_pid_nr(current));
1449
cba3345c
AW
1450 return ret;
1451}
1452
1453static long vfio_group_fops_unl_ioctl(struct file *filep,
1454 unsigned int cmd, unsigned long arg)
1455{
1456 struct vfio_group *group = filep->private_data;
1457 long ret = -ENOTTY;
1458
1459 switch (cmd) {
1460 case VFIO_GROUP_GET_STATUS:
1461 {
1462 struct vfio_group_status status;
1463 unsigned long minsz;
1464
1465 minsz = offsetofend(struct vfio_group_status, flags);
1466
1467 if (copy_from_user(&status, (void __user *)arg, minsz))
1468 return -EFAULT;
1469
1470 if (status.argsz < minsz)
1471 return -EINVAL;
1472
1473 status.flags = 0;
1474
1475 if (vfio_group_viable(group))
1476 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1477
1478 if (group->container)
1479 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1480
1481 if (copy_to_user((void __user *)arg, &status, minsz))
1482 return -EFAULT;
1483
1484 ret = 0;
1485 break;
1486 }
1487 case VFIO_GROUP_SET_CONTAINER:
1488 {
1489 int fd;
1490
1491 if (get_user(fd, (int __user *)arg))
1492 return -EFAULT;
1493
1494 if (fd < 0)
1495 return -EINVAL;
1496
1497 ret = vfio_group_set_container(group, fd);
1498 break;
1499 }
1500 case VFIO_GROUP_UNSET_CONTAINER:
1501 ret = vfio_group_unset_container(group);
1502 break;
1503 case VFIO_GROUP_GET_DEVICE_FD:
1504 {
1505 char *buf;
1506
1507 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1508 if (IS_ERR(buf))
1509 return PTR_ERR(buf);
1510
1511 ret = vfio_group_get_device_fd(group, buf);
1512 kfree(buf);
1513 break;
1514 }
1515 }
1516
1517 return ret;
1518}
1519
1520#ifdef CONFIG_COMPAT
1521static long vfio_group_fops_compat_ioctl(struct file *filep,
1522 unsigned int cmd, unsigned long arg)
1523{
1524 arg = (unsigned long)compat_ptr(arg);
1525 return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1526}
1527#endif /* CONFIG_COMPAT */
1528
1529static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1530{
1531 struct vfio_group *group;
6d6768c6 1532 int opened;
cba3345c
AW
1533
1534 group = vfio_group_get_from_minor(iminor(inode));
1535 if (!group)
1536 return -ENODEV;
1537
033291ec
AW
1538 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1539 vfio_group_put(group);
1540 return -EPERM;
1541 }
1542
6d6768c6
AW
1543 /* Do we need multiple instances of the group open? Seems not. */
1544 opened = atomic_cmpxchg(&group->opened, 0, 1);
1545 if (opened) {
1546 vfio_group_put(group);
1547 return -EBUSY;
1548 }
1549
1550 /* Is something still in use from a previous open? */
cba3345c 1551 if (group->container) {
6d6768c6 1552 atomic_dec(&group->opened);
cba3345c
AW
1553 vfio_group_put(group);
1554 return -EBUSY;
1555 }
1556
1557 filep->private_data = group;
1558
1559 return 0;
1560}
1561
1562static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1563{
1564 struct vfio_group *group = filep->private_data;
1565
1566 filep->private_data = NULL;
1567
1568 vfio_group_try_dissolve_container(group);
1569
6d6768c6
AW
1570 atomic_dec(&group->opened);
1571
cba3345c
AW
1572 vfio_group_put(group);
1573
1574 return 0;
1575}
1576
1577static const struct file_operations vfio_group_fops = {
1578 .owner = THIS_MODULE,
1579 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1580#ifdef CONFIG_COMPAT
1581 .compat_ioctl = vfio_group_fops_compat_ioctl,
1582#endif
1583 .open = vfio_group_fops_open,
1584 .release = vfio_group_fops_release,
1585};
1586
1587/**
1588 * VFIO Device fd
1589 */
1590static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1591{
1592 struct vfio_device *device = filep->private_data;
1593
1594 device->ops->release(device->device_data);
1595
1596 vfio_group_try_dissolve_container(device->group);
1597
1598 vfio_device_put(device);
1599
1600 return 0;
1601}
1602
1603static long vfio_device_fops_unl_ioctl(struct file *filep,
1604 unsigned int cmd, unsigned long arg)
1605{
1606 struct vfio_device *device = filep->private_data;
1607
1608 if (unlikely(!device->ops->ioctl))
1609 return -EINVAL;
1610
1611 return device->ops->ioctl(device->device_data, cmd, arg);
1612}
1613
1614static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1615 size_t count, loff_t *ppos)
1616{
1617 struct vfio_device *device = filep->private_data;
1618
1619 if (unlikely(!device->ops->read))
1620 return -EINVAL;
1621
1622 return device->ops->read(device->device_data, buf, count, ppos);
1623}
1624
1625static ssize_t vfio_device_fops_write(struct file *filep,
1626 const char __user *buf,
1627 size_t count, loff_t *ppos)
1628{
1629 struct vfio_device *device = filep->private_data;
1630
1631 if (unlikely(!device->ops->write))
1632 return -EINVAL;
1633
1634 return device->ops->write(device->device_data, buf, count, ppos);
1635}
1636
1637static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1638{
1639 struct vfio_device *device = filep->private_data;
1640
1641 if (unlikely(!device->ops->mmap))
1642 return -EINVAL;
1643
1644 return device->ops->mmap(device->device_data, vma);
1645}
1646
1647#ifdef CONFIG_COMPAT
1648static long vfio_device_fops_compat_ioctl(struct file *filep,
1649 unsigned int cmd, unsigned long arg)
1650{
1651 arg = (unsigned long)compat_ptr(arg);
1652 return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1653}
1654#endif /* CONFIG_COMPAT */
1655
1656static const struct file_operations vfio_device_fops = {
1657 .owner = THIS_MODULE,
1658 .release = vfio_device_fops_release,
1659 .read = vfio_device_fops_read,
1660 .write = vfio_device_fops_write,
1661 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1662#ifdef CONFIG_COMPAT
1663 .compat_ioctl = vfio_device_fops_compat_ioctl,
1664#endif
1665 .mmap = vfio_device_fops_mmap,
1666};
1667
6cdd9782
AK
1668/**
1669 * External user API, exported by symbols to be linked dynamically.
1670 *
1671 * The protocol includes:
1672 * 1. do normal VFIO init operation:
1673 * - opening a new container;
1674 * - attaching group(s) to it;
1675 * - setting an IOMMU driver for a container.
1676 * When IOMMU is set for a container, all groups in it are
1677 * considered ready to use by an external user.
1678 *
1679 * 2. User space passes a group fd to an external user.
1680 * The external user calls vfio_group_get_external_user()
1681 * to verify that:
1682 * - the group is initialized;
1683 * - IOMMU is set for it.
1684 * If both checks passed, vfio_group_get_external_user()
1685 * increments the container user counter to prevent
1686 * the VFIO group from disposal before KVM exits.
1687 *
1688 * 3. The external user calls vfio_external_user_iommu_id()
1689 * to know an IOMMU ID.
1690 *
1691 * 4. When the external KVM finishes, it calls
1692 * vfio_group_put_external_user() to release the VFIO group.
1693 * This call decrements the container user counter.
1694 */
1695struct vfio_group *vfio_group_get_external_user(struct file *filep)
1696{
1697 struct vfio_group *group = filep->private_data;
1698
1699 if (filep->f_op != &vfio_group_fops)
1700 return ERR_PTR(-EINVAL);
1701
1702 if (!atomic_inc_not_zero(&group->container_users))
1703 return ERR_PTR(-EINVAL);
1704
033291ec
AW
1705 if (group->noiommu) {
1706 atomic_dec(&group->container_users);
1707 return ERR_PTR(-EPERM);
1708 }
1709
6cdd9782
AK
1710 if (!group->container->iommu_driver ||
1711 !vfio_group_viable(group)) {
1712 atomic_dec(&group->container_users);
1713 return ERR_PTR(-EINVAL);
1714 }
1715
1716 vfio_group_get(group);
1717
1718 return group;
1719}
1720EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1721
1722void vfio_group_put_external_user(struct vfio_group *group)
1723{
1724 vfio_group_put(group);
1725 vfio_group_try_dissolve_container(group);
1726}
1727EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1728
1729int vfio_external_user_iommu_id(struct vfio_group *group)
1730{
1731 return iommu_group_id(group->iommu_group);
1732}
1733EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1734
88d7ab89
AW
1735long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1736{
1737 return vfio_ioctl_check_extension(group->container, arg);
1738}
1739EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1740
cba3345c
AW
1741/**
1742 * Module/class support
1743 */
1744static char *vfio_devnode(struct device *dev, umode_t *mode)
1745{
1746 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
1747}
1748
d1099901
AW
1749static struct miscdevice vfio_dev = {
1750 .minor = VFIO_MINOR,
1751 .name = "vfio",
1752 .fops = &vfio_fops,
1753 .nodename = "vfio/vfio",
1754 .mode = S_IRUGO | S_IWUGO,
1755};
1756
cba3345c
AW
1757static int __init vfio_init(void)
1758{
1759 int ret;
1760
1761 idr_init(&vfio.group_idr);
1762 mutex_init(&vfio.group_lock);
1763 mutex_init(&vfio.iommu_drivers_lock);
1764 INIT_LIST_HEAD(&vfio.group_list);
1765 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
1766 init_waitqueue_head(&vfio.release_q);
1767
d1099901
AW
1768 ret = misc_register(&vfio_dev);
1769 if (ret) {
1770 pr_err("vfio: misc device register failed\n");
1771 return ret;
1772 }
1773
1774 /* /dev/vfio/$GROUP */
cba3345c
AW
1775 vfio.class = class_create(THIS_MODULE, "vfio");
1776 if (IS_ERR(vfio.class)) {
1777 ret = PTR_ERR(vfio.class);
1778 goto err_class;
1779 }
1780
1781 vfio.class->devnode = vfio_devnode;
1782
d1099901 1783 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
cba3345c 1784 if (ret)
d1099901 1785 goto err_alloc_chrdev;
cba3345c 1786
cba3345c 1787 cdev_init(&vfio.group_cdev, &vfio_group_fops);
d1099901 1788 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
cba3345c 1789 if (ret)
d1099901 1790 goto err_cdev_add;
cba3345c
AW
1791
1792 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
1793
73fa0d10
AW
1794 /*
1795 * Attempt to load known iommu-drivers. This gives us a working
1796 * environment without the user needing to explicitly load iommu
1797 * drivers.
1798 */
1799 request_module_nowait("vfio_iommu_type1");
5ffd229c 1800 request_module_nowait("vfio_iommu_spapr_tce");
73fa0d10 1801
cba3345c
AW
1802 return 0;
1803
d1099901
AW
1804err_cdev_add:
1805 unregister_chrdev_region(vfio.group_devt, MINORMASK);
1806err_alloc_chrdev:
cba3345c
AW
1807 class_destroy(vfio.class);
1808 vfio.class = NULL;
1809err_class:
d1099901 1810 misc_deregister(&vfio_dev);
cba3345c
AW
1811 return ret;
1812}
1813
1814static void __exit vfio_cleanup(void)
1815{
1816 WARN_ON(!list_empty(&vfio.group_list));
1817
1818 idr_destroy(&vfio.group_idr);
1819 cdev_del(&vfio.group_cdev);
d1099901 1820 unregister_chrdev_region(vfio.group_devt, MINORMASK);
cba3345c
AW
1821 class_destroy(vfio.class);
1822 vfio.class = NULL;
d1099901 1823 misc_deregister(&vfio_dev);
cba3345c
AW
1824}
1825
1826module_init(vfio_init);
1827module_exit(vfio_cleanup);
1828
1829MODULE_VERSION(DRIVER_VERSION);
1830MODULE_LICENSE("GPL v2");
1831MODULE_AUTHOR(DRIVER_AUTHOR);
1832MODULE_DESCRIPTION(DRIVER_DESC);
d1099901
AW
1833MODULE_ALIAS_MISCDEV(VFIO_MINOR);
1834MODULE_ALIAS("devname:vfio/vfio");