1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
37 #define VHOST_VDPA_IOTLB_BUCKETS 16
39 struct vhost_vdpa_as {
40 struct hlist_node hash_link;
41 struct vhost_iotlb iotlb;
46 struct vhost_dev vdev;
47 struct iommu_domain *domain;
48 struct vhost_virtqueue *vqs;
49 struct completion completion;
50 struct vdpa_device *vdpa;
51 struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
58 struct eventfd_ctx *config_ctx;
60 struct vdpa_iova_range range;
64 static DEFINE_IDA(vhost_vdpa_ida);
66 static dev_t vhost_vdpa_major;
68 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
69 struct vhost_iotlb *iotlb, u64 start,
72 static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
74 struct vhost_vdpa_as *as = container_of(iotlb, struct
75 vhost_vdpa_as, iotlb);
79 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
81 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
82 struct vhost_vdpa_as *as;
84 hlist_for_each_entry(as, head, hash_link)
91 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
93 struct vhost_vdpa_as *as = asid_to_as(v, asid);
101 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
103 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
104 struct vhost_vdpa_as *as;
106 if (asid_to_as(v, asid))
109 if (asid >= v->vdpa->nas)
112 as = kmalloc(sizeof(*as), GFP_KERNEL);
116 vhost_iotlb_init(&as->iotlb, 0, 0);
118 hlist_add_head(&as->hash_link, head);
123 static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
126 struct vhost_vdpa_as *as = asid_to_as(v, asid);
131 return vhost_vdpa_alloc_as(v, asid);
134 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
136 struct vhost_vdpa_as *as = asid_to_as(v, asid);
141 hlist_del(&as->hash_link);
142 vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
148 static void handle_vq_kick(struct vhost_work *work)
150 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
152 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
153 const struct vdpa_config_ops *ops = v->vdpa->config;
155 ops->kick_vq(v->vdpa, vq - v->vqs);
158 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
160 struct vhost_virtqueue *vq = private;
161 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
164 eventfd_signal(call_ctx, 1);
169 static irqreturn_t vhost_vdpa_config_cb(void *private)
171 struct vhost_vdpa *v = private;
172 struct eventfd_ctx *config_ctx = v->config_ctx;
175 eventfd_signal(config_ctx, 1);
180 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
182 struct vhost_virtqueue *vq = &v->vqs[qid];
183 const struct vdpa_config_ops *ops = v->vdpa->config;
184 struct vdpa_device *vdpa = v->vdpa;
187 if (!ops->get_vq_irq)
190 irq = ops->get_vq_irq(vdpa, qid);
194 irq_bypass_unregister_producer(&vq->call_ctx.producer);
195 if (!vq->call_ctx.ctx)
198 vq->call_ctx.producer.token = vq->call_ctx.ctx;
199 vq->call_ctx.producer.irq = irq;
200 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
202 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
203 qid, vq->call_ctx.producer.token, ret);
206 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
208 struct vhost_virtqueue *vq = &v->vqs[qid];
210 irq_bypass_unregister_producer(&vq->call_ctx.producer);
213 static int vhost_vdpa_reset(struct vhost_vdpa *v)
215 struct vdpa_device *vdpa = v->vdpa;
219 return vdpa_reset(vdpa);
222 static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
224 struct vdpa_device *vdpa = v->vdpa;
225 const struct vdpa_config_ops *ops = vdpa->config;
227 if (!vdpa->use_va || !ops->bind_mm)
230 return ops->bind_mm(vdpa, v->vdev.mm);
233 static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
235 struct vdpa_device *vdpa = v->vdpa;
236 const struct vdpa_config_ops *ops = vdpa->config;
238 if (!vdpa->use_va || !ops->unbind_mm)
241 ops->unbind_mm(vdpa);
244 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
246 struct vdpa_device *vdpa = v->vdpa;
247 const struct vdpa_config_ops *ops = vdpa->config;
250 device_id = ops->get_device_id(vdpa);
252 if (copy_to_user(argp, &device_id, sizeof(device_id)))
258 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
260 struct vdpa_device *vdpa = v->vdpa;
261 const struct vdpa_config_ops *ops = vdpa->config;
264 status = ops->get_status(vdpa);
266 if (copy_to_user(statusp, &status, sizeof(status)))
272 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
274 struct vdpa_device *vdpa = v->vdpa;
275 const struct vdpa_config_ops *ops = vdpa->config;
276 u8 status, status_old;
281 if (copy_from_user(&status, statusp, sizeof(status)))
284 status_old = ops->get_status(vdpa);
287 * Userspace shouldn't remove status bits unless reset the
290 if (status != 0 && (status_old & ~status) != 0)
293 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
294 for (i = 0; i < nvqs; i++)
295 vhost_vdpa_unsetup_vq_irq(v, i);
298 ret = vdpa_reset(vdpa);
302 vdpa_set_status(vdpa, status);
304 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
305 for (i = 0; i < nvqs; i++)
306 vhost_vdpa_setup_vq_irq(v, i);
311 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
312 struct vhost_vdpa_config *c)
314 struct vdpa_device *vdpa = v->vdpa;
315 size_t size = vdpa->config->get_config_size(vdpa);
317 if (c->len == 0 || c->off > size)
320 if (c->len > size - c->off)
326 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
327 struct vhost_vdpa_config __user *c)
329 struct vdpa_device *vdpa = v->vdpa;
330 struct vhost_vdpa_config config;
331 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
334 if (copy_from_user(&config, c, size))
336 if (vhost_vdpa_config_validate(v, &config))
338 buf = kvzalloc(config.len, GFP_KERNEL);
342 vdpa_get_config(vdpa, config.off, buf, config.len);
344 if (copy_to_user(c->buf, buf, config.len)) {
353 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
354 struct vhost_vdpa_config __user *c)
356 struct vdpa_device *vdpa = v->vdpa;
357 struct vhost_vdpa_config config;
358 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
361 if (copy_from_user(&config, c, size))
363 if (vhost_vdpa_config_validate(v, &config))
366 buf = vmemdup_user(c->buf, config.len);
370 vdpa_set_config(vdpa, config.off, buf, config.len);
376 static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
378 struct vdpa_device *vdpa = v->vdpa;
379 const struct vdpa_config_ops *ops = vdpa->config;
384 static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
386 struct vdpa_device *vdpa = v->vdpa;
387 const struct vdpa_config_ops *ops = vdpa->config;
392 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
394 struct vdpa_device *vdpa = v->vdpa;
395 const struct vdpa_config_ops *ops = vdpa->config;
398 features = ops->get_device_features(vdpa);
400 if (copy_to_user(featurep, &features, sizeof(features)))
406 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
408 struct vdpa_device *vdpa = v->vdpa;
409 const struct vdpa_config_ops *ops = vdpa->config;
413 * It's not allowed to change the features after they have
416 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
419 if (copy_from_user(&features, featurep, sizeof(features)))
422 if (vdpa_set_features(vdpa, features))
428 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
430 struct vdpa_device *vdpa = v->vdpa;
431 const struct vdpa_config_ops *ops = vdpa->config;
434 num = ops->get_vq_num_max(vdpa);
436 if (copy_to_user(argp, &num, sizeof(num)))
442 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
445 eventfd_ctx_put(v->config_ctx);
446 v->config_ctx = NULL;
450 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
452 struct vdpa_callback cb;
454 struct eventfd_ctx *ctx;
456 cb.callback = vhost_vdpa_config_cb;
458 if (copy_from_user(&fd, argp, sizeof(fd)))
461 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
462 swap(ctx, v->config_ctx);
464 if (!IS_ERR_OR_NULL(ctx))
465 eventfd_ctx_put(ctx);
467 if (IS_ERR(v->config_ctx)) {
468 long ret = PTR_ERR(v->config_ctx);
470 v->config_ctx = NULL;
474 v->vdpa->config->set_config_cb(v->vdpa, &cb);
479 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
481 struct vhost_vdpa_iova_range range = {
482 .first = v->range.first,
483 .last = v->range.last,
486 if (copy_to_user(argp, &range, sizeof(range)))
491 static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
493 struct vdpa_device *vdpa = v->vdpa;
494 const struct vdpa_config_ops *ops = vdpa->config;
497 size = ops->get_config_size(vdpa);
499 if (copy_to_user(argp, &size, sizeof(size)))
505 static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
507 struct vdpa_device *vdpa = v->vdpa;
509 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
515 /* After a successful return of ioctl the device must not process more
516 * virtqueue descriptors. The device can answer to read or writes of config
517 * fields as if it were not suspended. In particular, writing to "queue_enable"
518 * with a value of 1 will not make the device start processing buffers.
520 static long vhost_vdpa_suspend(struct vhost_vdpa *v)
522 struct vdpa_device *vdpa = v->vdpa;
523 const struct vdpa_config_ops *ops = vdpa->config;
528 return ops->suspend(vdpa);
531 /* After a successful return of this ioctl the device resumes processing
532 * virtqueue descriptors. The device becomes fully operational the same way it
533 * was before it was suspended.
535 static long vhost_vdpa_resume(struct vhost_vdpa *v)
537 struct vdpa_device *vdpa = v->vdpa;
538 const struct vdpa_config_ops *ops = vdpa->config;
543 return ops->resume(vdpa);
546 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
549 struct vdpa_device *vdpa = v->vdpa;
550 const struct vdpa_config_ops *ops = vdpa->config;
551 struct vdpa_vq_state vq_state;
552 struct vdpa_callback cb;
553 struct vhost_virtqueue *vq;
554 struct vhost_vring_state s;
558 r = get_user(idx, (u32 __user *)argp);
565 idx = array_index_nospec(idx, v->nvqs);
569 case VHOST_VDPA_SET_VRING_ENABLE:
570 if (copy_from_user(&s, argp, sizeof(s)))
572 ops->set_vq_ready(vdpa, idx, s.num);
574 case VHOST_VDPA_GET_VRING_GROUP:
575 if (!ops->get_vq_group)
578 s.num = ops->get_vq_group(vdpa, idx);
579 if (s.num >= vdpa->ngroups)
581 else if (copy_to_user(argp, &s, sizeof(s)))
584 case VHOST_VDPA_SET_GROUP_ASID:
585 if (copy_from_user(&s, argp, sizeof(s)))
587 if (s.num >= vdpa->nas)
589 if (!ops->set_group_asid)
591 return ops->set_group_asid(vdpa, idx, s.num);
592 case VHOST_GET_VRING_BASE:
593 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
597 vq->last_avail_idx = vq_state.split.avail_index;
601 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
606 case VHOST_SET_VRING_ADDR:
607 if (ops->set_vq_address(vdpa, idx,
608 (u64)(uintptr_t)vq->desc,
609 (u64)(uintptr_t)vq->avail,
610 (u64)(uintptr_t)vq->used))
614 case VHOST_SET_VRING_BASE:
615 vq_state.split.avail_index = vq->last_avail_idx;
616 if (ops->set_vq_state(vdpa, idx, &vq_state))
620 case VHOST_SET_VRING_CALL:
621 if (vq->call_ctx.ctx) {
622 cb.callback = vhost_vdpa_virtqueue_cb;
624 cb.trigger = vq->call_ctx.ctx;
630 ops->set_vq_cb(vdpa, idx, &cb);
631 vhost_vdpa_setup_vq_irq(v, idx);
634 case VHOST_SET_VRING_NUM:
635 ops->set_vq_num(vdpa, idx, vq->num);
642 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
643 unsigned int cmd, unsigned long arg)
645 struct vhost_vdpa *v = filep->private_data;
646 struct vhost_dev *d = &v->vdev;
647 void __user *argp = (void __user *)arg;
648 u64 __user *featurep = argp;
652 if (cmd == VHOST_SET_BACKEND_FEATURES) {
653 if (copy_from_user(&features, featurep, sizeof(features)))
655 if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
656 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
657 BIT_ULL(VHOST_BACKEND_F_RESUME)))
659 if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
660 !vhost_vdpa_can_suspend(v))
662 if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
663 !vhost_vdpa_can_resume(v))
665 vhost_set_backend_features(&v->vdev, features);
669 mutex_lock(&d->mutex);
672 case VHOST_VDPA_GET_DEVICE_ID:
673 r = vhost_vdpa_get_device_id(v, argp);
675 case VHOST_VDPA_GET_STATUS:
676 r = vhost_vdpa_get_status(v, argp);
678 case VHOST_VDPA_SET_STATUS:
679 r = vhost_vdpa_set_status(v, argp);
681 case VHOST_VDPA_GET_CONFIG:
682 r = vhost_vdpa_get_config(v, argp);
684 case VHOST_VDPA_SET_CONFIG:
685 r = vhost_vdpa_set_config(v, argp);
687 case VHOST_GET_FEATURES:
688 r = vhost_vdpa_get_features(v, argp);
690 case VHOST_SET_FEATURES:
691 r = vhost_vdpa_set_features(v, argp);
693 case VHOST_VDPA_GET_VRING_NUM:
694 r = vhost_vdpa_get_vring_num(v, argp);
696 case VHOST_VDPA_GET_GROUP_NUM:
697 if (copy_to_user(argp, &v->vdpa->ngroups,
698 sizeof(v->vdpa->ngroups)))
701 case VHOST_VDPA_GET_AS_NUM:
702 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
705 case VHOST_SET_LOG_BASE:
706 case VHOST_SET_LOG_FD:
709 case VHOST_VDPA_SET_CONFIG_CALL:
710 r = vhost_vdpa_set_config_call(v, argp);
712 case VHOST_GET_BACKEND_FEATURES:
713 features = VHOST_VDPA_BACKEND_FEATURES;
714 if (vhost_vdpa_can_suspend(v))
715 features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
716 if (vhost_vdpa_can_resume(v))
717 features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
718 if (copy_to_user(featurep, &features, sizeof(features)))
721 case VHOST_VDPA_GET_IOVA_RANGE:
722 r = vhost_vdpa_get_iova_range(v, argp);
724 case VHOST_VDPA_GET_CONFIG_SIZE:
725 r = vhost_vdpa_get_config_size(v, argp);
727 case VHOST_VDPA_GET_VQS_COUNT:
728 r = vhost_vdpa_get_vqs_count(v, argp);
730 case VHOST_VDPA_SUSPEND:
731 r = vhost_vdpa_suspend(v);
733 case VHOST_VDPA_RESUME:
734 r = vhost_vdpa_resume(v);
737 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
738 if (r == -ENOIOCTLCMD)
739 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
747 case VHOST_SET_OWNER:
748 r = vhost_vdpa_bind_mm(v);
750 vhost_dev_reset_owner(d, NULL);
754 mutex_unlock(&d->mutex);
757 static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
758 struct vhost_iotlb_map *map, u32 asid)
760 struct vdpa_device *vdpa = v->vdpa;
761 const struct vdpa_config_ops *ops = vdpa->config;
763 ops->dma_unmap(vdpa, asid, map->start, map->size);
764 } else if (ops->set_map == NULL) {
765 iommu_unmap(v->domain, map->start, map->size);
769 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
770 u64 start, u64 last, u32 asid)
772 struct vhost_dev *dev = &v->vdev;
773 struct vhost_iotlb_map *map;
775 unsigned long pfn, pinned;
777 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
778 pinned = PFN_DOWN(map->size);
779 for (pfn = PFN_DOWN(map->addr);
780 pinned > 0; pfn++, pinned--) {
781 page = pfn_to_page(pfn);
782 if (map->perm & VHOST_ACCESS_WO)
783 set_page_dirty_lock(page);
784 unpin_user_page(page);
786 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
787 vhost_vdpa_general_unmap(v, map, asid);
788 vhost_iotlb_map_free(iotlb, map);
792 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
793 u64 start, u64 last, u32 asid)
795 struct vhost_iotlb_map *map;
796 struct vdpa_map_file *map_file;
798 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
799 map_file = (struct vdpa_map_file *)map->opaque;
800 fput(map_file->file);
802 vhost_vdpa_general_unmap(v, map, asid);
803 vhost_iotlb_map_free(iotlb, map);
807 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
808 struct vhost_iotlb *iotlb, u64 start,
811 struct vdpa_device *vdpa = v->vdpa;
814 return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
816 return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
819 static int perm_to_iommu_flags(u32 perm)
824 case VHOST_ACCESS_WO:
825 flags |= IOMMU_WRITE;
827 case VHOST_ACCESS_RO:
830 case VHOST_ACCESS_RW:
831 flags |= (IOMMU_WRITE | IOMMU_READ);
834 WARN(1, "invalidate vhost IOTLB permission\n");
838 return flags | IOMMU_CACHE;
841 static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
842 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
844 struct vhost_dev *dev = &v->vdev;
845 struct vdpa_device *vdpa = v->vdpa;
846 const struct vdpa_config_ops *ops = vdpa->config;
847 u32 asid = iotlb_to_asid(iotlb);
850 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
856 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
857 } else if (ops->set_map) {
859 r = ops->set_map(vdpa, asid, iotlb);
861 r = iommu_map(v->domain, iova, pa, size,
862 perm_to_iommu_flags(perm), GFP_KERNEL);
865 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
870 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
875 static void vhost_vdpa_unmap(struct vhost_vdpa *v,
876 struct vhost_iotlb *iotlb,
879 struct vdpa_device *vdpa = v->vdpa;
880 const struct vdpa_config_ops *ops = vdpa->config;
881 u32 asid = iotlb_to_asid(iotlb);
883 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
887 ops->set_map(vdpa, asid, iotlb);
892 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
893 struct vhost_iotlb *iotlb,
894 u64 iova, u64 size, u64 uaddr, u32 perm)
896 struct vhost_dev *dev = &v->vdev;
897 u64 offset, map_size, map_iova = iova;
898 struct vdpa_map_file *map_file;
899 struct vm_area_struct *vma;
902 mmap_read_lock(dev->mm);
905 vma = find_vma(dev->mm, uaddr);
910 map_size = min(size, vma->vm_end - uaddr);
911 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
912 !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
915 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
920 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
921 map_file->offset = offset;
922 map_file->file = get_file(vma->vm_file);
923 ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
926 fput(map_file->file);
933 map_iova += map_size;
936 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
938 mmap_read_unlock(dev->mm);
943 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
944 struct vhost_iotlb *iotlb,
945 u64 iova, u64 size, u64 uaddr, u32 perm)
947 struct vhost_dev *dev = &v->vdev;
948 struct page **page_list;
949 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
950 unsigned int gup_flags = FOLL_LONGTERM;
951 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
952 unsigned long lock_limit, sz2pin, nchunks, i;
957 /* Limit the use of memory for bookkeeping */
958 page_list = (struct page **) __get_free_page(GFP_KERNEL);
962 if (perm & VHOST_ACCESS_WO)
963 gup_flags |= FOLL_WRITE;
965 npages = PFN_UP(size + (iova & ~PAGE_MASK));
971 mmap_read_lock(dev->mm);
973 lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
974 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
979 cur_base = uaddr & PAGE_MASK;
984 sz2pin = min_t(unsigned long, npages, list_size);
985 pinned = pin_user_pages(cur_base, sz2pin,
986 gup_flags, page_list, NULL);
987 if (sz2pin != pinned) {
991 unpin_user_pages(page_list, pinned);
999 map_pfn = page_to_pfn(page_list[0]);
1001 for (i = 0; i < pinned; i++) {
1002 unsigned long this_pfn = page_to_pfn(page_list[i]);
1005 if (last_pfn && (this_pfn != last_pfn + 1)) {
1006 /* Pin a contiguous chunk of memory */
1007 csize = PFN_PHYS(last_pfn - map_pfn + 1);
1008 ret = vhost_vdpa_map(v, iotlb, iova, csize,
1013 * Unpin the pages that are left unmapped
1014 * from this point on in the current
1015 * page_list. The remaining outstanding
1016 * ones which may stride across several
1017 * chunks will be covered in the common
1018 * error path subsequently.
1020 unpin_user_pages(&page_list[i],
1030 last_pfn = this_pfn;
1033 cur_base += PFN_PHYS(pinned);
1037 /* Pin the rest chunk */
1038 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
1039 PFN_PHYS(map_pfn), perm, NULL);
1046 * Unpin the outstanding pages which are yet to be
1047 * mapped but haven't due to vdpa_map() or
1048 * pin_user_pages() failure.
1050 * Mapped pages are accounted in vdpa_map(), hence
1051 * the corresponding unpinning will be handled by
1055 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1056 unpin_user_page(pfn_to_page(pfn));
1058 vhost_vdpa_unmap(v, iotlb, start, size);
1061 mmap_read_unlock(dev->mm);
1063 free_page((unsigned long)page_list);
1068 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1069 struct vhost_iotlb *iotlb,
1070 struct vhost_iotlb_msg *msg)
1072 struct vdpa_device *vdpa = v->vdpa;
1074 if (msg->iova < v->range.first || !msg->size ||
1075 msg->iova > U64_MAX - msg->size + 1 ||
1076 msg->iova + msg->size - 1 > v->range.last)
1079 if (vhost_iotlb_itree_first(iotlb, msg->iova,
1080 msg->iova + msg->size - 1))
1084 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1085 msg->uaddr, msg->perm);
1087 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1091 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1092 struct vhost_iotlb_msg *msg)
1094 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1095 struct vdpa_device *vdpa = v->vdpa;
1096 const struct vdpa_config_ops *ops = vdpa->config;
1097 struct vhost_iotlb *iotlb = NULL;
1098 struct vhost_vdpa_as *as = NULL;
1101 mutex_lock(&dev->mutex);
1103 r = vhost_dev_check_owner(dev);
1107 if (msg->type == VHOST_IOTLB_UPDATE ||
1108 msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1109 as = vhost_vdpa_find_alloc_as(v, asid);
1111 dev_err(&v->dev, "can't find and alloc asid %d\n",
1118 iotlb = asid_to_iotlb(v, asid);
1120 if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1121 if (v->in_batch && v->batch_asid != asid) {
1122 dev_info(&v->dev, "batch id %d asid %d\n",
1123 v->batch_asid, asid);
1126 dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1131 switch (msg->type) {
1132 case VHOST_IOTLB_UPDATE:
1133 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1135 case VHOST_IOTLB_INVALIDATE:
1136 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1138 case VHOST_IOTLB_BATCH_BEGIN:
1139 v->batch_asid = asid;
1142 case VHOST_IOTLB_BATCH_END:
1143 if (v->in_batch && ops->set_map)
1144 ops->set_map(vdpa, asid, iotlb);
1145 v->in_batch = false;
1152 mutex_unlock(&dev->mutex);
1157 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1158 struct iov_iter *from)
1160 struct file *file = iocb->ki_filp;
1161 struct vhost_vdpa *v = file->private_data;
1162 struct vhost_dev *dev = &v->vdev;
1164 return vhost_chr_write_iter(dev, from);
1167 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1169 struct vdpa_device *vdpa = v->vdpa;
1170 const struct vdpa_config_ops *ops = vdpa->config;
1171 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1172 const struct bus_type *bus;
1175 /* Device want to do DMA by itself */
1176 if (ops->set_map || ops->dma_map)
1183 if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
1184 dev_warn_once(&v->dev,
1185 "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
1189 v->domain = iommu_domain_alloc(bus);
1193 ret = iommu_attach_device(v->domain, dma_dev);
1200 iommu_domain_free(v->domain);
1205 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1207 struct vdpa_device *vdpa = v->vdpa;
1208 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1211 iommu_detach_device(v->domain, dma_dev);
1212 iommu_domain_free(v->domain);
1218 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1220 struct vdpa_iova_range *range = &v->range;
1221 struct vdpa_device *vdpa = v->vdpa;
1222 const struct vdpa_config_ops *ops = vdpa->config;
1224 if (ops->get_iova_range) {
1225 *range = ops->get_iova_range(vdpa);
1226 } else if (v->domain && v->domain->geometry.force_aperture) {
1227 range->first = v->domain->geometry.aperture_start;
1228 range->last = v->domain->geometry.aperture_end;
1231 range->last = ULLONG_MAX;
1235 static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1237 struct vhost_vdpa_as *as;
1240 for (asid = 0; asid < v->vdpa->nas; asid++) {
1241 as = asid_to_as(v, asid);
1243 vhost_vdpa_remove_as(v, asid);
1246 vhost_vdpa_free_domain(v);
1247 vhost_dev_cleanup(&v->vdev);
1251 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1253 struct vhost_vdpa *v;
1254 struct vhost_dev *dev;
1255 struct vhost_virtqueue **vqs;
1259 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1261 opened = atomic_cmpxchg(&v->opened, 0, 1);
1266 r = vhost_vdpa_reset(v);
1270 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1277 for (i = 0; i < nvqs; i++) {
1278 vqs[i] = &v->vqs[i];
1279 vqs[i]->handle_kick = handle_vq_kick;
1281 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1282 vhost_vdpa_process_iotlb_msg);
1284 r = vhost_vdpa_alloc_domain(v);
1286 goto err_alloc_domain;
1288 vhost_vdpa_set_iova_range(v);
1290 filep->private_data = v;
1295 vhost_vdpa_cleanup(v);
1297 atomic_dec(&v->opened);
1301 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1305 for (i = 0; i < v->nvqs; i++)
1306 vhost_vdpa_unsetup_vq_irq(v, i);
1309 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1311 struct vhost_vdpa *v = filep->private_data;
1312 struct vhost_dev *d = &v->vdev;
1314 mutex_lock(&d->mutex);
1315 filep->private_data = NULL;
1316 vhost_vdpa_clean_irq(v);
1317 vhost_vdpa_reset(v);
1318 vhost_dev_stop(&v->vdev);
1319 vhost_vdpa_unbind_mm(v);
1320 vhost_vdpa_config_put(v);
1321 vhost_vdpa_cleanup(v);
1322 mutex_unlock(&d->mutex);
1324 atomic_dec(&v->opened);
1325 complete(&v->completion);
1331 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1333 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1334 struct vdpa_device *vdpa = v->vdpa;
1335 const struct vdpa_config_ops *ops = vdpa->config;
1336 struct vdpa_notification_area notify;
1337 struct vm_area_struct *vma = vmf->vma;
1338 u16 index = vma->vm_pgoff;
1340 notify = ops->get_vq_notification(vdpa, index);
1342 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1343 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1344 PFN_DOWN(notify.addr), PAGE_SIZE,
1346 return VM_FAULT_SIGBUS;
1348 return VM_FAULT_NOPAGE;
1351 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1352 .fault = vhost_vdpa_fault,
1355 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1357 struct vhost_vdpa *v = vma->vm_file->private_data;
1358 struct vdpa_device *vdpa = v->vdpa;
1359 const struct vdpa_config_ops *ops = vdpa->config;
1360 struct vdpa_notification_area notify;
1361 unsigned long index = vma->vm_pgoff;
1363 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1365 if ((vma->vm_flags & VM_SHARED) == 0)
1367 if (vma->vm_flags & VM_READ)
1371 if (!ops->get_vq_notification)
1374 /* To be safe and easily modelled by userspace, We only
1375 * support the doorbell which sits on the page boundary and
1376 * does not share the page with other registers.
1378 notify = ops->get_vq_notification(vdpa, index);
1379 if (notify.addr & (PAGE_SIZE - 1))
1381 if (vma->vm_end - vma->vm_start != notify.size)
1384 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1385 vma->vm_ops = &vhost_vdpa_vm_ops;
1388 #endif /* CONFIG_MMU */
1390 static const struct file_operations vhost_vdpa_fops = {
1391 .owner = THIS_MODULE,
1392 .open = vhost_vdpa_open,
1393 .release = vhost_vdpa_release,
1394 .write_iter = vhost_vdpa_chr_write_iter,
1395 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1397 .mmap = vhost_vdpa_mmap,
1398 #endif /* CONFIG_MMU */
1399 .compat_ioctl = compat_ptr_ioctl,
1402 static void vhost_vdpa_release_dev(struct device *device)
1404 struct vhost_vdpa *v =
1405 container_of(device, struct vhost_vdpa, dev);
1407 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1412 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1414 const struct vdpa_config_ops *ops = vdpa->config;
1415 struct vhost_vdpa *v;
1419 /* We can't support platform IOMMU device with more than 1
1422 if (!ops->set_map && !ops->dma_map &&
1423 (vdpa->ngroups > 1 || vdpa->nas > 1))
1426 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1430 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1431 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1437 atomic_set(&v->opened, 0);
1440 v->nvqs = vdpa->nvqs;
1441 v->virtio_id = ops->get_device_id(vdpa);
1443 device_initialize(&v->dev);
1444 v->dev.release = vhost_vdpa_release_dev;
1445 v->dev.parent = &vdpa->dev;
1446 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1447 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1454 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1458 cdev_init(&v->cdev, &vhost_vdpa_fops);
1459 v->cdev.owner = THIS_MODULE;
1461 r = cdev_device_add(&v->cdev, &v->dev);
1465 init_completion(&v->completion);
1466 vdpa_set_drvdata(vdpa, v);
1468 for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1469 INIT_HLIST_HEAD(&v->as[i]);
1474 put_device(&v->dev);
1475 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1479 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1481 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1484 cdev_device_del(&v->cdev, &v->dev);
1487 opened = atomic_cmpxchg(&v->opened, 0, 1);
1490 wait_for_completion(&v->completion);
1493 put_device(&v->dev);
1496 static struct vdpa_driver vhost_vdpa_driver = {
1498 .name = "vhost_vdpa",
1500 .probe = vhost_vdpa_probe,
1501 .remove = vhost_vdpa_remove,
1504 static int __init vhost_vdpa_init(void)
1508 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1511 goto err_alloc_chrdev;
1513 r = vdpa_register_driver(&vhost_vdpa_driver);
1515 goto err_vdpa_register_driver;
1519 err_vdpa_register_driver:
1520 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1524 module_init(vhost_vdpa_init);
1526 static void __exit vhost_vdpa_exit(void)
1528 vdpa_unregister_driver(&vhost_vdpa_driver);
1529 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1531 module_exit(vhost_vdpa_exit);
1533 MODULE_VERSION("0.0.1");
1534 MODULE_LICENSE("GPL v2");
1535 MODULE_AUTHOR("Intel Corporation");
1536 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");