Commit | Line | Data |
---|---|---|
9eefba80 YL |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * VFIO core | |
4 | * | |
5 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
6 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
7 | * | |
8 | * Derived from original vfio: | |
9 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
10 | * Author: Tom Lyon, pugs@cisco.com | |
11 | */ | |
12 | ||
13 | #include <linux/vfio.h> | |
14 | #include <linux/iommufd.h> | |
15 | #include <linux/anon_inodes.h> | |
16 | #include "vfio.h" | |
17 | ||
18 | static struct vfio { | |
19 | struct class *class; | |
20 | struct list_head group_list; | |
21 | struct mutex group_lock; /* locks group_list */ | |
22 | struct ida group_ida; | |
23 | dev_t group_devt; | |
24 | } vfio; | |
25 | ||
26 | static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, | |
27 | char *buf) | |
28 | { | |
29 | struct vfio_device *it, *device = ERR_PTR(-ENODEV); | |
30 | ||
31 | mutex_lock(&group->device_lock); | |
32 | list_for_each_entry(it, &group->device_list, group_next) { | |
33 | int ret; | |
34 | ||
35 | if (it->ops->match) { | |
36 | ret = it->ops->match(it, buf); | |
37 | if (ret < 0) { | |
38 | device = ERR_PTR(ret); | |
39 | break; | |
40 | } | |
41 | } else { | |
42 | ret = !strcmp(dev_name(it->dev), buf); | |
43 | } | |
44 | ||
45 | if (ret && vfio_device_try_get_registration(it)) { | |
46 | device = it; | |
47 | break; | |
48 | } | |
49 | } | |
50 | mutex_unlock(&group->device_lock); | |
51 | ||
52 | return device; | |
53 | } | |
54 | ||
55 | /* | |
56 | * VFIO Group fd, /dev/vfio/$GROUP | |
57 | */ | |
58 | static bool vfio_group_has_iommu(struct vfio_group *group) | |
59 | { | |
60 | lockdep_assert_held(&group->group_lock); | |
61 | /* | |
62 | * There can only be users if there is a container, and if there is a | |
63 | * container there must be users. | |
64 | */ | |
65 | WARN_ON(!group->container != !group->container_users); | |
66 | ||
67 | return group->container || group->iommufd; | |
68 | } | |
69 | ||
70 | /* | |
71 | * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or | |
72 | * if there was no container to unset. Since the ioctl is called on | |
73 | * the group, we know that still exists, therefore the only valid | |
74 | * transition here is 1->0. | |
75 | */ | |
76 | static int vfio_group_ioctl_unset_container(struct vfio_group *group) | |
77 | { | |
78 | int ret = 0; | |
79 | ||
80 | mutex_lock(&group->group_lock); | |
81 | if (!vfio_group_has_iommu(group)) { | |
82 | ret = -EINVAL; | |
83 | goto out_unlock; | |
84 | } | |
85 | if (group->container) { | |
86 | if (group->container_users != 1) { | |
87 | ret = -EBUSY; | |
88 | goto out_unlock; | |
89 | } | |
90 | vfio_group_detach_container(group); | |
91 | } | |
92 | if (group->iommufd) { | |
93 | iommufd_ctx_put(group->iommufd); | |
94 | group->iommufd = NULL; | |
95 | } | |
96 | ||
97 | out_unlock: | |
98 | mutex_unlock(&group->group_lock); | |
99 | return ret; | |
100 | } | |
101 | ||
102 | static int vfio_group_ioctl_set_container(struct vfio_group *group, | |
103 | int __user *arg) | |
104 | { | |
105 | struct vfio_container *container; | |
106 | struct iommufd_ctx *iommufd; | |
107 | struct fd f; | |
108 | int ret; | |
109 | int fd; | |
110 | ||
111 | if (get_user(fd, arg)) | |
112 | return -EFAULT; | |
113 | ||
114 | f = fdget(fd); | |
115 | if (!f.file) | |
116 | return -EBADF; | |
117 | ||
118 | mutex_lock(&group->group_lock); | |
119 | if (vfio_group_has_iommu(group)) { | |
120 | ret = -EINVAL; | |
121 | goto out_unlock; | |
122 | } | |
123 | if (!group->iommu_group) { | |
124 | ret = -ENODEV; | |
125 | goto out_unlock; | |
126 | } | |
127 | ||
128 | container = vfio_container_from_file(f.file); | |
129 | if (container) { | |
130 | ret = vfio_container_attach_group(container, group); | |
131 | goto out_unlock; | |
132 | } | |
133 | ||
134 | iommufd = iommufd_ctx_from_file(f.file); | |
135 | if (!IS_ERR(iommufd)) { | |
c9a397ce JG |
136 | if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && |
137 | group->type == VFIO_NO_IOMMU) | |
138 | ret = iommufd_vfio_compat_set_no_iommu(iommufd); | |
139 | else | |
140 | ret = iommufd_vfio_compat_ioas_create(iommufd); | |
9eefba80 | 141 | |
9eefba80 | 142 | if (ret) { |
d649c34c | 143 | iommufd_ctx_put(iommufd); |
9eefba80 YL |
144 | goto out_unlock; |
145 | } | |
146 | ||
147 | group->iommufd = iommufd; | |
148 | goto out_unlock; | |
149 | } | |
150 | ||
151 | /* The FD passed is not recognized. */ | |
152 | ret = -EBADFD; | |
153 | ||
154 | out_unlock: | |
155 | mutex_unlock(&group->group_lock); | |
156 | fdput(f); | |
157 | return ret; | |
158 | } | |
159 | ||
2b48f52f MR |
160 | static void vfio_device_group_get_kvm_safe(struct vfio_device *device) |
161 | { | |
162 | spin_lock(&device->group->kvm_ref_lock); | |
163 | if (!device->group->kvm) | |
164 | goto unlock; | |
165 | ||
166 | _vfio_device_get_kvm_safe(device, device->group->kvm); | |
167 | ||
168 | unlock: | |
169 | spin_unlock(&device->group->kvm_ref_lock); | |
170 | } | |
171 | ||
9eefba80 YL |
172 | static int vfio_device_group_open(struct vfio_device *device) |
173 | { | |
174 | int ret; | |
175 | ||
176 | mutex_lock(&device->group->group_lock); | |
177 | if (!vfio_group_has_iommu(device->group)) { | |
178 | ret = -EINVAL; | |
179 | goto out_unlock; | |
180 | } | |
181 | ||
2b48f52f MR |
182 | mutex_lock(&device->dev_set->lock); |
183 | ||
9eefba80 | 184 | /* |
2b48f52f MR |
185 | * Before the first device open, get the KVM pointer currently |
186 | * associated with the group (if there is one) and obtain a reference | |
187 | * now that will be held until the open_count reaches 0 again. Save | |
188 | * the pointer in the device for use by drivers. | |
9eefba80 | 189 | */ |
2b48f52f MR |
190 | if (device->open_count == 0) |
191 | vfio_device_group_get_kvm_safe(device); | |
192 | ||
b0d2d569 | 193 | ret = vfio_device_open(device, device->group->iommufd); |
2b48f52f MR |
194 | |
195 | if (device->open_count == 0) | |
196 | vfio_device_put_kvm(device); | |
197 | ||
198 | mutex_unlock(&device->dev_set->lock); | |
9eefba80 YL |
199 | |
200 | out_unlock: | |
201 | mutex_unlock(&device->group->group_lock); | |
202 | return ret; | |
203 | } | |
204 | ||
205 | void vfio_device_group_close(struct vfio_device *device) | |
206 | { | |
207 | mutex_lock(&device->group->group_lock); | |
2b48f52f MR |
208 | mutex_lock(&device->dev_set->lock); |
209 | ||
9eefba80 | 210 | vfio_device_close(device, device->group->iommufd); |
2b48f52f MR |
211 | |
212 | if (device->open_count == 0) | |
213 | vfio_device_put_kvm(device); | |
214 | ||
215 | mutex_unlock(&device->dev_set->lock); | |
9eefba80 YL |
216 | mutex_unlock(&device->group->group_lock); |
217 | } | |
218 | ||
219 | static struct file *vfio_device_open_file(struct vfio_device *device) | |
220 | { | |
221 | struct file *filep; | |
222 | int ret; | |
223 | ||
224 | ret = vfio_device_group_open(device); | |
225 | if (ret) | |
226 | goto err_out; | |
227 | ||
228 | /* | |
229 | * We can't use anon_inode_getfd() because we need to modify | |
230 | * the f_mode flags directly to allow more than just ioctls | |
231 | */ | |
232 | filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, | |
233 | device, O_RDWR); | |
234 | if (IS_ERR(filep)) { | |
235 | ret = PTR_ERR(filep); | |
236 | goto err_close_device; | |
237 | } | |
238 | ||
239 | /* | |
240 | * TODO: add an anon_inode interface to do this. | |
241 | * Appears to be missing by lack of need rather than | |
242 | * explicitly prevented. Now there's need. | |
243 | */ | |
244 | filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); | |
245 | ||
246 | if (device->group->type == VFIO_NO_IOMMU) | |
247 | dev_warn(device->dev, "vfio-noiommu device opened by user " | |
248 | "(%s:%d)\n", current->comm, task_pid_nr(current)); | |
249 | /* | |
250 | * On success the ref of device is moved to the file and | |
251 | * put in vfio_device_fops_release() | |
252 | */ | |
253 | return filep; | |
254 | ||
255 | err_close_device: | |
256 | vfio_device_group_close(device); | |
257 | err_out: | |
258 | return ERR_PTR(ret); | |
259 | } | |
260 | ||
261 | static int vfio_group_ioctl_get_device_fd(struct vfio_group *group, | |
262 | char __user *arg) | |
263 | { | |
264 | struct vfio_device *device; | |
265 | struct file *filep; | |
266 | char *buf; | |
267 | int fdno; | |
268 | int ret; | |
269 | ||
270 | buf = strndup_user(arg, PAGE_SIZE); | |
271 | if (IS_ERR(buf)) | |
272 | return PTR_ERR(buf); | |
273 | ||
274 | device = vfio_device_get_from_name(group, buf); | |
275 | kfree(buf); | |
276 | if (IS_ERR(device)) | |
277 | return PTR_ERR(device); | |
278 | ||
279 | fdno = get_unused_fd_flags(O_CLOEXEC); | |
280 | if (fdno < 0) { | |
281 | ret = fdno; | |
282 | goto err_put_device; | |
283 | } | |
284 | ||
285 | filep = vfio_device_open_file(device); | |
286 | if (IS_ERR(filep)) { | |
287 | ret = PTR_ERR(filep); | |
288 | goto err_put_fdno; | |
289 | } | |
290 | ||
291 | fd_install(fdno, filep); | |
292 | return fdno; | |
293 | ||
294 | err_put_fdno: | |
295 | put_unused_fd(fdno); | |
296 | err_put_device: | |
297 | vfio_device_put_registration(device); | |
298 | return ret; | |
299 | } | |
300 | ||
301 | static int vfio_group_ioctl_get_status(struct vfio_group *group, | |
302 | struct vfio_group_status __user *arg) | |
303 | { | |
304 | unsigned long minsz = offsetofend(struct vfio_group_status, flags); | |
305 | struct vfio_group_status status; | |
306 | ||
307 | if (copy_from_user(&status, arg, minsz)) | |
308 | return -EFAULT; | |
309 | ||
310 | if (status.argsz < minsz) | |
311 | return -EINVAL; | |
312 | ||
313 | status.flags = 0; | |
314 | ||
315 | mutex_lock(&group->group_lock); | |
316 | if (!group->iommu_group) { | |
317 | mutex_unlock(&group->group_lock); | |
318 | return -ENODEV; | |
319 | } | |
320 | ||
321 | /* | |
322 | * With the container FD the iommu_group_claim_dma_owner() is done | |
323 | * during SET_CONTAINER but for IOMMFD this is done during | |
324 | * VFIO_GROUP_GET_DEVICE_FD. Meaning that with iommufd | |
325 | * VFIO_GROUP_FLAGS_VIABLE could be set but GET_DEVICE_FD will fail due | |
326 | * to viability. | |
327 | */ | |
328 | if (vfio_group_has_iommu(group)) | |
329 | status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | | |
330 | VFIO_GROUP_FLAGS_VIABLE; | |
331 | else if (!iommu_group_dma_owner_claimed(group->iommu_group)) | |
332 | status.flags |= VFIO_GROUP_FLAGS_VIABLE; | |
333 | mutex_unlock(&group->group_lock); | |
334 | ||
335 | if (copy_to_user(arg, &status, minsz)) | |
336 | return -EFAULT; | |
337 | return 0; | |
338 | } | |
339 | ||
340 | static long vfio_group_fops_unl_ioctl(struct file *filep, | |
341 | unsigned int cmd, unsigned long arg) | |
342 | { | |
343 | struct vfio_group *group = filep->private_data; | |
344 | void __user *uarg = (void __user *)arg; | |
345 | ||
346 | switch (cmd) { | |
347 | case VFIO_GROUP_GET_DEVICE_FD: | |
348 | return vfio_group_ioctl_get_device_fd(group, uarg); | |
349 | case VFIO_GROUP_GET_STATUS: | |
350 | return vfio_group_ioctl_get_status(group, uarg); | |
351 | case VFIO_GROUP_SET_CONTAINER: | |
352 | return vfio_group_ioctl_set_container(group, uarg); | |
353 | case VFIO_GROUP_UNSET_CONTAINER: | |
354 | return vfio_group_ioctl_unset_container(group); | |
355 | default: | |
356 | return -ENOTTY; | |
357 | } | |
358 | } | |
359 | ||
360 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) | |
361 | { | |
362 | struct vfio_group *group = | |
363 | container_of(inode->i_cdev, struct vfio_group, cdev); | |
364 | int ret; | |
365 | ||
366 | mutex_lock(&group->group_lock); | |
367 | ||
368 | /* | |
369 | * drivers can be zero if this races with vfio_device_remove_group(), it | |
370 | * will be stable at 0 under the group rwsem | |
371 | */ | |
372 | if (refcount_read(&group->drivers) == 0) { | |
373 | ret = -ENODEV; | |
374 | goto out_unlock; | |
375 | } | |
376 | ||
377 | if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { | |
378 | ret = -EPERM; | |
379 | goto out_unlock; | |
380 | } | |
381 | ||
382 | /* | |
383 | * Do we need multiple instances of the group open? Seems not. | |
384 | */ | |
385 | if (group->opened_file) { | |
386 | ret = -EBUSY; | |
387 | goto out_unlock; | |
388 | } | |
389 | group->opened_file = filep; | |
390 | filep->private_data = group; | |
391 | ret = 0; | |
392 | out_unlock: | |
393 | mutex_unlock(&group->group_lock); | |
394 | return ret; | |
395 | } | |
396 | ||
397 | static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |
398 | { | |
399 | struct vfio_group *group = filep->private_data; | |
400 | ||
401 | filep->private_data = NULL; | |
402 | ||
403 | mutex_lock(&group->group_lock); | |
404 | /* | |
405 | * Device FDs hold a group file reference, therefore the group release | |
406 | * is only called when there are no open devices. | |
407 | */ | |
408 | WARN_ON(group->notifier.head); | |
409 | if (group->container) | |
410 | vfio_group_detach_container(group); | |
411 | if (group->iommufd) { | |
412 | iommufd_ctx_put(group->iommufd); | |
413 | group->iommufd = NULL; | |
414 | } | |
415 | group->opened_file = NULL; | |
416 | mutex_unlock(&group->group_lock); | |
417 | return 0; | |
418 | } | |
419 | ||
420 | static const struct file_operations vfio_group_fops = { | |
421 | .owner = THIS_MODULE, | |
422 | .unlocked_ioctl = vfio_group_fops_unl_ioctl, | |
423 | .compat_ioctl = compat_ptr_ioctl, | |
424 | .open = vfio_group_fops_open, | |
425 | .release = vfio_group_fops_release, | |
426 | }; | |
427 | ||
428 | /* | |
429 | * Group objects - create, release, get, put, search | |
430 | */ | |
431 | static struct vfio_group * | |
432 | vfio_group_find_from_iommu(struct iommu_group *iommu_group) | |
433 | { | |
434 | struct vfio_group *group; | |
435 | ||
436 | lockdep_assert_held(&vfio.group_lock); | |
437 | ||
438 | /* | |
439 | * group->iommu_group from the vfio.group_list cannot be NULL | |
440 | * under the vfio.group_lock. | |
441 | */ | |
442 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
443 | if (group->iommu_group == iommu_group) | |
444 | return group; | |
445 | } | |
446 | return NULL; | |
447 | } | |
448 | ||
449 | static void vfio_group_release(struct device *dev) | |
450 | { | |
451 | struct vfio_group *group = container_of(dev, struct vfio_group, dev); | |
452 | ||
453 | mutex_destroy(&group->device_lock); | |
454 | mutex_destroy(&group->group_lock); | |
455 | WARN_ON(group->iommu_group); | |
456 | ida_free(&vfio.group_ida, MINOR(group->dev.devt)); | |
457 | kfree(group); | |
458 | } | |
459 | ||
460 | static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, | |
461 | enum vfio_group_type type) | |
462 | { | |
463 | struct vfio_group *group; | |
464 | int minor; | |
465 | ||
466 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
467 | if (!group) | |
468 | return ERR_PTR(-ENOMEM); | |
469 | ||
470 | minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL); | |
471 | if (minor < 0) { | |
472 | kfree(group); | |
473 | return ERR_PTR(minor); | |
474 | } | |
475 | ||
476 | device_initialize(&group->dev); | |
477 | group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor); | |
478 | group->dev.class = vfio.class; | |
479 | group->dev.release = vfio_group_release; | |
480 | cdev_init(&group->cdev, &vfio_group_fops); | |
481 | group->cdev.owner = THIS_MODULE; | |
482 | ||
483 | refcount_set(&group->drivers, 1); | |
484 | mutex_init(&group->group_lock); | |
2b48f52f | 485 | spin_lock_init(&group->kvm_ref_lock); |
9eefba80 YL |
486 | INIT_LIST_HEAD(&group->device_list); |
487 | mutex_init(&group->device_lock); | |
488 | group->iommu_group = iommu_group; | |
489 | /* put in vfio_group_release() */ | |
490 | iommu_group_ref_get(iommu_group); | |
491 | group->type = type; | |
492 | BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); | |
493 | ||
494 | return group; | |
495 | } | |
496 | ||
497 | static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, | |
498 | enum vfio_group_type type) | |
499 | { | |
500 | struct vfio_group *group; | |
501 | struct vfio_group *ret; | |
502 | int err; | |
503 | ||
504 | lockdep_assert_held(&vfio.group_lock); | |
505 | ||
506 | group = vfio_group_alloc(iommu_group, type); | |
507 | if (IS_ERR(group)) | |
508 | return group; | |
509 | ||
510 | err = dev_set_name(&group->dev, "%s%d", | |
511 | group->type == VFIO_NO_IOMMU ? "noiommu-" : "", | |
512 | iommu_group_id(iommu_group)); | |
513 | if (err) { | |
514 | ret = ERR_PTR(err); | |
515 | goto err_put; | |
516 | } | |
517 | ||
518 | err = cdev_device_add(&group->cdev, &group->dev); | |
519 | if (err) { | |
520 | ret = ERR_PTR(err); | |
521 | goto err_put; | |
522 | } | |
523 | ||
524 | list_add(&group->vfio_next, &vfio.group_list); | |
525 | ||
526 | return group; | |
527 | ||
528 | err_put: | |
529 | put_device(&group->dev); | |
530 | return ret; | |
531 | } | |
532 | ||
533 | static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev, | |
534 | enum vfio_group_type type) | |
535 | { | |
536 | struct iommu_group *iommu_group; | |
537 | struct vfio_group *group; | |
538 | int ret; | |
539 | ||
540 | iommu_group = iommu_group_alloc(); | |
541 | if (IS_ERR(iommu_group)) | |
542 | return ERR_CAST(iommu_group); | |
543 | ||
544 | ret = iommu_group_set_name(iommu_group, "vfio-noiommu"); | |
545 | if (ret) | |
546 | goto out_put_group; | |
547 | ret = iommu_group_add_device(iommu_group, dev); | |
548 | if (ret) | |
549 | goto out_put_group; | |
550 | ||
551 | mutex_lock(&vfio.group_lock); | |
552 | group = vfio_create_group(iommu_group, type); | |
553 | mutex_unlock(&vfio.group_lock); | |
554 | if (IS_ERR(group)) { | |
555 | ret = PTR_ERR(group); | |
556 | goto out_remove_device; | |
557 | } | |
558 | iommu_group_put(iommu_group); | |
559 | return group; | |
560 | ||
561 | out_remove_device: | |
562 | iommu_group_remove_device(dev); | |
563 | out_put_group: | |
564 | iommu_group_put(iommu_group); | |
565 | return ERR_PTR(ret); | |
566 | } | |
567 | ||
568 | static bool vfio_group_has_device(struct vfio_group *group, struct device *dev) | |
569 | { | |
570 | struct vfio_device *device; | |
571 | ||
572 | mutex_lock(&group->device_lock); | |
573 | list_for_each_entry(device, &group->device_list, group_next) { | |
574 | if (device->dev == dev) { | |
575 | mutex_unlock(&group->device_lock); | |
576 | return true; | |
577 | } | |
578 | } | |
579 | mutex_unlock(&group->device_lock); | |
580 | return false; | |
581 | } | |
582 | ||
583 | static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) | |
584 | { | |
585 | struct iommu_group *iommu_group; | |
586 | struct vfio_group *group; | |
587 | ||
588 | iommu_group = iommu_group_get(dev); | |
589 | if (!iommu_group && vfio_noiommu) { | |
590 | /* | |
591 | * With noiommu enabled, create an IOMMU group for devices that | |
592 | * don't already have one, implying no IOMMU hardware/driver | |
593 | * exists. Taint the kernel because we're about to give a DMA | |
594 | * capable device to a user without IOMMU protection. | |
595 | */ | |
596 | group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU); | |
597 | if (!IS_ERR(group)) { | |
598 | add_taint(TAINT_USER, LOCKDEP_STILL_OK); | |
599 | dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); | |
600 | } | |
601 | return group; | |
602 | } | |
603 | ||
604 | if (!iommu_group) | |
605 | return ERR_PTR(-EINVAL); | |
606 | ||
607 | /* | |
608 | * VFIO always sets IOMMU_CACHE because we offer no way for userspace to | |
609 | * restore cache coherency. It has to be checked here because it is only | |
610 | * valid for cases where we are using iommu groups. | |
611 | */ | |
612 | if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { | |
613 | iommu_group_put(iommu_group); | |
614 | return ERR_PTR(-EINVAL); | |
615 | } | |
616 | ||
617 | mutex_lock(&vfio.group_lock); | |
618 | group = vfio_group_find_from_iommu(iommu_group); | |
619 | if (group) { | |
620 | if (WARN_ON(vfio_group_has_device(group, dev))) | |
621 | group = ERR_PTR(-EINVAL); | |
622 | else | |
623 | refcount_inc(&group->drivers); | |
624 | } else { | |
625 | group = vfio_create_group(iommu_group, VFIO_IOMMU); | |
626 | } | |
627 | mutex_unlock(&vfio.group_lock); | |
628 | ||
629 | /* The vfio_group holds a reference to the iommu_group */ | |
630 | iommu_group_put(iommu_group); | |
631 | return group; | |
632 | } | |
633 | ||
634 | int vfio_device_set_group(struct vfio_device *device, | |
635 | enum vfio_group_type type) | |
636 | { | |
637 | struct vfio_group *group; | |
638 | ||
639 | if (type == VFIO_IOMMU) | |
640 | group = vfio_group_find_or_alloc(device->dev); | |
641 | else | |
642 | group = vfio_noiommu_group_alloc(device->dev, type); | |
643 | ||
644 | if (IS_ERR(group)) | |
645 | return PTR_ERR(group); | |
646 | ||
647 | /* Our reference on group is moved to the device */ | |
648 | device->group = group; | |
649 | return 0; | |
650 | } | |
651 | ||
652 | void vfio_device_remove_group(struct vfio_device *device) | |
653 | { | |
654 | struct vfio_group *group = device->group; | |
655 | struct iommu_group *iommu_group; | |
656 | ||
657 | if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) | |
658 | iommu_group_remove_device(device->dev); | |
659 | ||
660 | /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */ | |
661 | if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock)) | |
662 | return; | |
663 | list_del(&group->vfio_next); | |
664 | ||
665 | /* | |
666 | * We could concurrently probe another driver in the group that might | |
667 | * race vfio_device_remove_group() with vfio_get_group(), so we have to | |
668 | * ensure that the sysfs is all cleaned up under lock otherwise the | |
669 | * cdev_device_add() will fail due to the name aready existing. | |
670 | */ | |
671 | cdev_device_del(&group->cdev, &group->dev); | |
672 | ||
673 | mutex_lock(&group->group_lock); | |
674 | /* | |
675 | * These data structures all have paired operations that can only be | |
676 | * undone when the caller holds a live reference on the device. Since | |
677 | * all pairs must be undone these WARN_ON's indicate some caller did not | |
678 | * properly hold the group reference. | |
679 | */ | |
680 | WARN_ON(!list_empty(&group->device_list)); | |
681 | WARN_ON(group->notifier.head); | |
682 | ||
683 | /* | |
684 | * Revoke all users of group->iommu_group. At this point we know there | |
685 | * are no devices active because we are unplugging the last one. Setting | |
686 | * iommu_group to NULL blocks all new users. | |
687 | */ | |
688 | if (group->container) | |
689 | vfio_group_detach_container(group); | |
690 | iommu_group = group->iommu_group; | |
691 | group->iommu_group = NULL; | |
692 | mutex_unlock(&group->group_lock); | |
693 | mutex_unlock(&vfio.group_lock); | |
694 | ||
695 | iommu_group_put(iommu_group); | |
696 | put_device(&group->dev); | |
697 | } | |
698 | ||
699 | void vfio_device_group_register(struct vfio_device *device) | |
700 | { | |
701 | mutex_lock(&device->group->device_lock); | |
702 | list_add(&device->group_next, &device->group->device_list); | |
703 | mutex_unlock(&device->group->device_lock); | |
704 | } | |
705 | ||
706 | void vfio_device_group_unregister(struct vfio_device *device) | |
707 | { | |
708 | mutex_lock(&device->group->device_lock); | |
709 | list_del(&device->group_next); | |
710 | mutex_unlock(&device->group->device_lock); | |
711 | } | |
712 | ||
713 | int vfio_device_group_use_iommu(struct vfio_device *device) | |
714 | { | |
715 | struct vfio_group *group = device->group; | |
716 | int ret = 0; | |
717 | ||
718 | lockdep_assert_held(&group->group_lock); | |
719 | ||
720 | if (WARN_ON(!group->container)) | |
721 | return -EINVAL; | |
722 | ||
723 | ret = vfio_group_use_container(group); | |
724 | if (ret) | |
725 | return ret; | |
726 | vfio_device_container_register(device); | |
727 | return 0; | |
728 | } | |
729 | ||
730 | void vfio_device_group_unuse_iommu(struct vfio_device *device) | |
731 | { | |
732 | struct vfio_group *group = device->group; | |
733 | ||
734 | lockdep_assert_held(&group->group_lock); | |
735 | ||
736 | if (WARN_ON(!group->container)) | |
737 | return; | |
738 | ||
739 | vfio_device_container_unregister(device); | |
740 | vfio_group_unuse_container(group); | |
741 | } | |
742 | ||
743 | bool vfio_device_has_container(struct vfio_device *device) | |
744 | { | |
745 | return device->group->container; | |
746 | } | |
747 | ||
748 | /** | |
749 | * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file | |
750 | * @file: VFIO group file | |
751 | * | |
752 | * The returned iommu_group is valid as long as a ref is held on the file. This | |
753 | * returns a reference on the group. This function is deprecated, only the SPAPR | |
754 | * path in kvm should call it. | |
755 | */ | |
756 | struct iommu_group *vfio_file_iommu_group(struct file *file) | |
757 | { | |
758 | struct vfio_group *group = file->private_data; | |
759 | struct iommu_group *iommu_group = NULL; | |
760 | ||
761 | if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU)) | |
762 | return NULL; | |
763 | ||
764 | if (!vfio_file_is_group(file)) | |
765 | return NULL; | |
766 | ||
767 | mutex_lock(&group->group_lock); | |
768 | if (group->iommu_group) { | |
769 | iommu_group = group->iommu_group; | |
770 | iommu_group_ref_get(iommu_group); | |
771 | } | |
772 | mutex_unlock(&group->group_lock); | |
773 | return iommu_group; | |
774 | } | |
775 | EXPORT_SYMBOL_GPL(vfio_file_iommu_group); | |
776 | ||
777 | /** | |
778 | * vfio_file_is_group - True if the file is usable with VFIO aPIS | |
779 | * @file: VFIO group file | |
780 | */ | |
781 | bool vfio_file_is_group(struct file *file) | |
782 | { | |
783 | return file->f_op == &vfio_group_fops; | |
784 | } | |
785 | EXPORT_SYMBOL_GPL(vfio_file_is_group); | |
786 | ||
787 | /** | |
788 | * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file | |
789 | * is always CPU cache coherent | |
790 | * @file: VFIO group file | |
791 | * | |
792 | * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop | |
793 | * bit in DMA transactions. A return of false indicates that the user has | |
794 | * rights to access additional instructions such as wbinvd on x86. | |
795 | */ | |
796 | bool vfio_file_enforced_coherent(struct file *file) | |
797 | { | |
798 | struct vfio_group *group = file->private_data; | |
799 | struct vfio_device *device; | |
800 | bool ret = true; | |
801 | ||
802 | if (!vfio_file_is_group(file)) | |
803 | return true; | |
804 | ||
805 | /* | |
806 | * If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then | |
807 | * any domain later attached to it will also not support it. If the cap | |
808 | * is set then the iommu_domain eventually attached to the device/group | |
809 | * must use a domain with enforce_cache_coherency(). | |
810 | */ | |
811 | mutex_lock(&group->device_lock); | |
812 | list_for_each_entry(device, &group->device_list, group_next) { | |
813 | if (!device_iommu_capable(device->dev, | |
814 | IOMMU_CAP_ENFORCE_CACHE_COHERENCY)) { | |
815 | ret = false; | |
816 | break; | |
817 | } | |
818 | } | |
819 | mutex_unlock(&group->device_lock); | |
820 | return ret; | |
821 | } | |
822 | EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); | |
823 | ||
824 | /** | |
825 | * vfio_file_set_kvm - Link a kvm with VFIO drivers | |
826 | * @file: VFIO group file | |
827 | * @kvm: KVM to link | |
828 | * | |
829 | * When a VFIO device is first opened the KVM will be available in | |
830 | * device->kvm if one was associated with the group. | |
831 | */ | |
832 | void vfio_file_set_kvm(struct file *file, struct kvm *kvm) | |
833 | { | |
834 | struct vfio_group *group = file->private_data; | |
835 | ||
836 | if (!vfio_file_is_group(file)) | |
837 | return; | |
838 | ||
2b48f52f | 839 | spin_lock(&group->kvm_ref_lock); |
9eefba80 | 840 | group->kvm = kvm; |
2b48f52f | 841 | spin_unlock(&group->kvm_ref_lock); |
9eefba80 YL |
842 | } |
843 | EXPORT_SYMBOL_GPL(vfio_file_set_kvm); | |
844 | ||
845 | /** | |
846 | * vfio_file_has_dev - True if the VFIO file is a handle for device | |
847 | * @file: VFIO file to check | |
848 | * @device: Device that must be part of the file | |
849 | * | |
850 | * Returns true if given file has permission to manipulate the given device. | |
851 | */ | |
852 | bool vfio_file_has_dev(struct file *file, struct vfio_device *device) | |
853 | { | |
854 | struct vfio_group *group = file->private_data; | |
855 | ||
856 | if (!vfio_file_is_group(file)) | |
857 | return false; | |
858 | ||
859 | return group == device->group; | |
860 | } | |
861 | EXPORT_SYMBOL_GPL(vfio_file_has_dev); | |
862 | ||
71a7507a | 863 | static char *vfio_devnode(const struct device *dev, umode_t *mode) |
9eefba80 YL |
864 | { |
865 | return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); | |
866 | } | |
867 | ||
868 | int __init vfio_group_init(void) | |
869 | { | |
870 | int ret; | |
871 | ||
872 | ida_init(&vfio.group_ida); | |
873 | mutex_init(&vfio.group_lock); | |
874 | INIT_LIST_HEAD(&vfio.group_list); | |
875 | ||
876 | ret = vfio_container_init(); | |
877 | if (ret) | |
878 | return ret; | |
879 | ||
880 | /* /dev/vfio/$GROUP */ | |
1aaba11d | 881 | vfio.class = class_create("vfio"); |
9eefba80 YL |
882 | if (IS_ERR(vfio.class)) { |
883 | ret = PTR_ERR(vfio.class); | |
884 | goto err_group_class; | |
885 | } | |
886 | ||
887 | vfio.class->devnode = vfio_devnode; | |
888 | ||
889 | ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); | |
890 | if (ret) | |
891 | goto err_alloc_chrdev; | |
892 | return 0; | |
893 | ||
894 | err_alloc_chrdev: | |
895 | class_destroy(vfio.class); | |
896 | vfio.class = NULL; | |
897 | err_group_class: | |
898 | vfio_container_cleanup(); | |
899 | return ret; | |
900 | } | |
901 | ||
902 | void vfio_group_cleanup(void) | |
903 | { | |
904 | WARN_ON(!list_empty(&vfio.group_list)); | |
905 | ida_destroy(&vfio.group_ida); | |
906 | unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); | |
907 | class_destroy(vfio.class); | |
908 | vfio.class = NULL; | |
909 | vfio_container_cleanup(); | |
910 | } |