2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
48 static const struct intel_gvt_ops *intel_gvt_ops;
50 /* helper macros copied from vfio-pci */
51 #define VFIO_PCI_OFFSET_SHIFT 40
52 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
53 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
56 #define OPREGION_SIGNATURE "IntelGraphicsMem"
59 struct intel_vgpu_regops {
60 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
61 size_t count, loff_t *ppos, bool iswrite);
62 void (*release)(struct intel_vgpu *vgpu,
63 struct vfio_region *region);
71 const struct intel_vgpu_regops *ops;
77 struct hlist_node hnode;
80 struct kvmgt_guest_info {
82 struct intel_vgpu *vgpu;
83 struct kvm_page_track_notifier_node track_node;
84 #define NR_BKT (1 << 18)
85 struct hlist_head ptable[NR_BKT];
95 static inline bool handle_valid(unsigned long handle)
97 return !!(handle & ~0xff);
100 static int kvmgt_guest_init(struct mdev_device *mdev);
101 static void intel_vgpu_release_work(struct work_struct *work);
102 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
104 static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
108 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
111 if (unlikely(!pfn_valid(pfn)))
114 page = pfn_to_page(pfn);
115 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
116 PCI_DMA_BIDIRECTIONAL);
117 if (dma_mapping_error(dev, daddr))
120 *iova = (unsigned long)(daddr >> PAGE_SHIFT);
124 static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
126 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
129 daddr = (dma_addr_t)(iova << PAGE_SHIFT);
130 dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
133 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
135 struct rb_node *node = vgpu->vdev.cache.rb_node;
136 struct gvt_dma *ret = NULL;
139 struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
142 node = node->rb_left;
143 else if (gfn > itr->gfn)
144 node = node->rb_right;
155 static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
157 struct gvt_dma *entry;
160 mutex_lock(&vgpu->vdev.cache_lock);
162 entry = __gvt_cache_find(vgpu, gfn);
163 iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
165 mutex_unlock(&vgpu->vdev.cache_lock);
169 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
172 struct gvt_dma *new, *itr;
173 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
175 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
182 mutex_lock(&vgpu->vdev.cache_lock);
185 itr = rb_entry(parent, struct gvt_dma, node);
189 else if (gfn < itr->gfn)
190 link = &parent->rb_left;
192 link = &parent->rb_right;
195 rb_link_node(&new->node, parent, link);
196 rb_insert_color(&new->node, &vgpu->vdev.cache);
197 mutex_unlock(&vgpu->vdev.cache_lock);
201 mutex_unlock(&vgpu->vdev.cache_lock);
205 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
206 struct gvt_dma *entry)
208 rb_erase(&entry->node, &vgpu->vdev.cache);
212 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
214 struct device *dev = mdev_dev(vgpu->vdev.mdev);
215 struct gvt_dma *this;
219 mutex_lock(&vgpu->vdev.cache_lock);
220 this = __gvt_cache_find(vgpu, gfn);
222 mutex_unlock(&vgpu->vdev.cache_lock);
227 gvt_dma_unmap_iova(vgpu, this->iova);
228 rc = vfio_unpin_pages(dev, &g1, 1);
230 __gvt_cache_remove_entry(vgpu, this);
231 mutex_unlock(&vgpu->vdev.cache_lock);
234 static void gvt_cache_init(struct intel_vgpu *vgpu)
236 vgpu->vdev.cache = RB_ROOT;
237 mutex_init(&vgpu->vdev.cache_lock);
240 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
243 struct rb_node *node = NULL;
244 struct device *dev = mdev_dev(vgpu->vdev.mdev);
248 mutex_lock(&vgpu->vdev.cache_lock);
249 node = rb_first(&vgpu->vdev.cache);
251 mutex_unlock(&vgpu->vdev.cache_lock);
254 dma = rb_entry(node, struct gvt_dma, node);
255 gvt_dma_unmap_iova(vgpu, dma->iova);
257 __gvt_cache_remove_entry(vgpu, dma);
258 mutex_unlock(&vgpu->vdev.cache_lock);
259 vfio_unpin_pages(dev, &gfn, 1);
263 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
265 hash_init(info->ptable);
268 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
270 struct kvmgt_pgfn *p;
271 struct hlist_node *tmp;
274 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
280 static struct kvmgt_pgfn *
281 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
283 struct kvmgt_pgfn *p, *res = NULL;
285 hash_for_each_possible(info->ptable, p, hnode, gfn) {
295 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
298 struct kvmgt_pgfn *p;
300 p = __kvmgt_protect_table_find(info, gfn);
304 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
306 struct kvmgt_pgfn *p;
308 if (kvmgt_gfn_is_write_protected(info, gfn))
311 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
312 if (WARN(!p, "gfn: 0x%llx\n", gfn))
316 hash_add(info->ptable, &p->hnode, gfn);
319 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
322 struct kvmgt_pgfn *p;
324 p = __kvmgt_protect_table_find(info, gfn);
331 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
332 size_t count, loff_t *ppos, bool iswrite)
334 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
335 VFIO_PCI_NUM_REGIONS;
336 void *base = vgpu->vdev.region[i].data;
337 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
339 if (pos >= vgpu->vdev.region[i].size || iswrite) {
340 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
343 count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
344 memcpy(buf, base + pos, count);
349 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
350 struct vfio_region *region)
354 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
355 .rw = intel_vgpu_reg_rw_opregion,
356 .release = intel_vgpu_reg_release_opregion,
359 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
360 unsigned int type, unsigned int subtype,
361 const struct intel_vgpu_regops *ops,
362 size_t size, u32 flags, void *data)
364 struct vfio_region *region;
366 region = krealloc(vgpu->vdev.region,
367 (vgpu->vdev.num_regions + 1) * sizeof(*region),
372 vgpu->vdev.region = region;
373 vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
374 vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
375 vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
376 vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
377 vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
378 vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
379 vgpu->vdev.num_regions++;
383 static int kvmgt_get_vfio_device(void *p_vgpu)
385 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
387 vgpu->vdev.vfio_device = vfio_device_get_from_dev(
388 mdev_dev(vgpu->vdev.mdev));
389 if (!vgpu->vdev.vfio_device) {
390 gvt_vgpu_err("failed to get vfio device\n");
397 static int kvmgt_set_opregion(void *p_vgpu)
399 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
403 /* Each vgpu has its own opregion, although VFIO would create another
404 * one later. This one is used to expose opregion to VFIO. And the
405 * other one created by VFIO later, is used by guest actually.
407 base = vgpu_opregion(vgpu)->va;
411 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
416 ret = intel_vgpu_register_reg(vgpu,
417 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
418 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
419 &intel_vgpu_regops_opregion, OPREGION_SIZE,
420 VFIO_REGION_INFO_FLAG_READ, base);
425 static void kvmgt_put_vfio_device(void *vgpu)
427 if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
430 vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
433 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
435 struct intel_vgpu *vgpu = NULL;
436 struct intel_vgpu_type *type;
441 pdev = mdev_parent_dev(mdev);
442 gvt = kdev_to_i915(pdev)->gvt;
444 type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
446 gvt_vgpu_err("failed to find type %s to create\n",
452 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
453 if (IS_ERR_OR_NULL(vgpu)) {
454 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
455 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
459 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
461 vgpu->vdev.mdev = mdev;
462 mdev_set_drvdata(mdev, vgpu);
464 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
465 dev_name(mdev_dev(mdev)));
472 static int intel_vgpu_remove(struct mdev_device *mdev)
474 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
476 if (handle_valid(vgpu->handle))
479 intel_gvt_ops->vgpu_destroy(vgpu);
483 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
484 unsigned long action, void *data)
486 struct intel_vgpu *vgpu = container_of(nb,
488 vdev.iommu_notifier);
490 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
491 struct vfio_iommu_type1_dma_unmap *unmap = data;
492 unsigned long gfn, end_gfn;
494 gfn = unmap->iova >> PAGE_SHIFT;
495 end_gfn = gfn + unmap->size / PAGE_SIZE;
497 while (gfn < end_gfn)
498 gvt_cache_remove(vgpu, gfn++);
504 static int intel_vgpu_group_notifier(struct notifier_block *nb,
505 unsigned long action, void *data)
507 struct intel_vgpu *vgpu = container_of(nb,
509 vdev.group_notifier);
511 /* the only action we care about */
512 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
513 vgpu->vdev.kvm = data;
516 schedule_work(&vgpu->vdev.release_work);
522 static int intel_vgpu_open(struct mdev_device *mdev)
524 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
525 unsigned long events;
528 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
529 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
531 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
532 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
533 &vgpu->vdev.iommu_notifier);
535 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
540 events = VFIO_GROUP_NOTIFY_SET_KVM;
541 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
542 &vgpu->vdev.group_notifier);
544 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
549 ret = kvmgt_guest_init(mdev);
553 intel_gvt_ops->vgpu_activate(vgpu);
555 atomic_set(&vgpu->vdev.released, 0);
559 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
560 &vgpu->vdev.group_notifier);
563 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
564 &vgpu->vdev.iommu_notifier);
569 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
571 struct kvmgt_guest_info *info;
574 if (!handle_valid(vgpu->handle))
577 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
580 intel_gvt_ops->vgpu_deactivate(vgpu);
582 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
583 &vgpu->vdev.iommu_notifier);
584 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
586 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
587 &vgpu->vdev.group_notifier);
588 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
590 info = (struct kvmgt_guest_info *)vgpu->handle;
591 kvmgt_guest_exit(info);
593 vgpu->vdev.kvm = NULL;
597 static void intel_vgpu_release(struct mdev_device *mdev)
599 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
601 __intel_vgpu_release(vgpu);
604 static void intel_vgpu_release_work(struct work_struct *work)
606 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
609 __intel_vgpu_release(vgpu);
612 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
614 u32 start_lo, start_hi;
617 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
618 PCI_BASE_ADDRESS_MEM_MASK;
619 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
620 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
623 case PCI_BASE_ADDRESS_MEM_TYPE_64:
624 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
627 case PCI_BASE_ADDRESS_MEM_TYPE_32:
628 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
629 /* 1M mem BAR treated as 32-bit BAR */
631 /* mem unknown type treated as 32-bit BAR */
636 return ((u64)start_hi << 32) | start_lo;
639 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
640 void *buf, unsigned int count, bool is_write)
642 uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
646 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
647 bar_start + off, buf, count);
649 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
650 bar_start + off, buf, count);
654 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
656 return off >= vgpu_aperture_offset(vgpu) &&
657 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
660 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
661 void *buf, unsigned long count, bool is_write)
665 if (!intel_vgpu_in_aperture(vgpu, off) ||
666 !intel_vgpu_in_aperture(vgpu, off + count)) {
667 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
671 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
672 ALIGN_DOWN(off, PAGE_SIZE),
673 count + offset_in_page(off));
678 memcpy(aperture_va + offset_in_page(off), buf, count);
680 memcpy(buf, aperture_va + offset_in_page(off), count);
682 io_mapping_unmap(aperture_va);
687 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
688 size_t count, loff_t *ppos, bool is_write)
690 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
691 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
692 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
696 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
697 gvt_vgpu_err("invalid index: %u\n", index);
702 case VFIO_PCI_CONFIG_REGION_INDEX:
704 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
707 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
710 case VFIO_PCI_BAR0_REGION_INDEX:
711 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
712 buf, count, is_write);
714 case VFIO_PCI_BAR2_REGION_INDEX:
715 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
717 case VFIO_PCI_BAR1_REGION_INDEX:
718 case VFIO_PCI_BAR3_REGION_INDEX:
719 case VFIO_PCI_BAR4_REGION_INDEX:
720 case VFIO_PCI_BAR5_REGION_INDEX:
721 case VFIO_PCI_VGA_REGION_INDEX:
722 case VFIO_PCI_ROM_REGION_INDEX:
725 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
728 index -= VFIO_PCI_NUM_REGIONS;
729 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
733 return ret == 0 ? count : ret;
736 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
737 size_t count, loff_t *ppos)
739 unsigned int done = 0;
745 if (count >= 4 && !(*ppos % 4)) {
748 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
753 if (copy_to_user(buf, &val, sizeof(val)))
757 } else if (count >= 2 && !(*ppos % 2)) {
760 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
765 if (copy_to_user(buf, &val, sizeof(val)))
772 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
777 if (copy_to_user(buf, &val, sizeof(val)))
795 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
796 const char __user *buf,
797 size_t count, loff_t *ppos)
799 unsigned int done = 0;
805 if (count >= 4 && !(*ppos % 4)) {
808 if (copy_from_user(&val, buf, sizeof(val)))
811 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
817 } else if (count >= 2 && !(*ppos % 2)) {
820 if (copy_from_user(&val, buf, sizeof(val)))
823 ret = intel_vgpu_rw(mdev, (char *)&val,
824 sizeof(val), ppos, true);
832 if (copy_from_user(&val, buf, sizeof(val)))
835 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
854 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
858 unsigned long req_size, pgoff = 0;
860 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
862 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
863 if (index >= VFIO_PCI_ROM_REGION_INDEX)
866 if (vma->vm_end < vma->vm_start)
868 if ((vma->vm_flags & VM_SHARED) == 0)
870 if (index != VFIO_PCI_BAR2_REGION_INDEX)
873 pg_prot = vma->vm_page_prot;
874 virtaddr = vma->vm_start;
875 req_size = vma->vm_end - vma->vm_start;
876 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
878 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
881 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
883 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
889 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
890 unsigned int index, unsigned int start,
891 unsigned int count, uint32_t flags,
897 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
898 unsigned int index, unsigned int start,
899 unsigned int count, uint32_t flags, void *data)
904 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
905 unsigned int index, unsigned int start, unsigned int count,
906 uint32_t flags, void *data)
911 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
912 unsigned int index, unsigned int start, unsigned int count,
913 uint32_t flags, void *data)
915 struct eventfd_ctx *trigger;
917 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
918 int fd = *(int *)data;
920 trigger = eventfd_ctx_fdget(fd);
921 if (IS_ERR(trigger)) {
922 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
923 return PTR_ERR(trigger);
925 vgpu->vdev.msi_trigger = trigger;
931 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
932 unsigned int index, unsigned int start, unsigned int count,
935 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
936 unsigned int start, unsigned int count, uint32_t flags,
940 case VFIO_PCI_INTX_IRQ_INDEX:
941 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
942 case VFIO_IRQ_SET_ACTION_MASK:
943 func = intel_vgpu_set_intx_mask;
945 case VFIO_IRQ_SET_ACTION_UNMASK:
946 func = intel_vgpu_set_intx_unmask;
948 case VFIO_IRQ_SET_ACTION_TRIGGER:
949 func = intel_vgpu_set_intx_trigger;
953 case VFIO_PCI_MSI_IRQ_INDEX:
954 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
955 case VFIO_IRQ_SET_ACTION_MASK:
956 case VFIO_IRQ_SET_ACTION_UNMASK:
957 /* XXX Need masking support exported */
959 case VFIO_IRQ_SET_ACTION_TRIGGER:
960 func = intel_vgpu_set_msi_trigger;
969 return func(vgpu, index, start, count, flags, data);
972 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
975 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
978 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
980 if (cmd == VFIO_DEVICE_GET_INFO) {
981 struct vfio_device_info info;
983 minsz = offsetofend(struct vfio_device_info, num_irqs);
985 if (copy_from_user(&info, (void __user *)arg, minsz))
988 if (info.argsz < minsz)
991 info.flags = VFIO_DEVICE_FLAGS_PCI;
992 info.flags |= VFIO_DEVICE_FLAGS_RESET;
993 info.num_regions = VFIO_PCI_NUM_REGIONS +
994 vgpu->vdev.num_regions;
995 info.num_irqs = VFIO_PCI_NUM_IRQS;
997 return copy_to_user((void __user *)arg, &info, minsz) ?
1000 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1001 struct vfio_region_info info;
1002 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1004 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1009 minsz = offsetofend(struct vfio_region_info, offset);
1011 if (copy_from_user(&info, (void __user *)arg, minsz))
1014 if (info.argsz < minsz)
1017 switch (info.index) {
1018 case VFIO_PCI_CONFIG_REGION_INDEX:
1019 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1020 info.size = vgpu->gvt->device_info.cfg_space_size;
1021 info.flags = VFIO_REGION_INFO_FLAG_READ |
1022 VFIO_REGION_INFO_FLAG_WRITE;
1024 case VFIO_PCI_BAR0_REGION_INDEX:
1025 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1026 info.size = vgpu->cfg_space.bar[info.index].size;
1032 info.flags = VFIO_REGION_INFO_FLAG_READ |
1033 VFIO_REGION_INFO_FLAG_WRITE;
1035 case VFIO_PCI_BAR1_REGION_INDEX:
1036 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1040 case VFIO_PCI_BAR2_REGION_INDEX:
1041 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1042 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1043 VFIO_REGION_INFO_FLAG_MMAP |
1044 VFIO_REGION_INFO_FLAG_READ |
1045 VFIO_REGION_INFO_FLAG_WRITE;
1046 info.size = gvt_aperture_sz(vgpu->gvt);
1048 size = sizeof(*sparse) +
1049 (nr_areas * sizeof(*sparse->areas));
1050 sparse = kzalloc(size, GFP_KERNEL);
1054 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1055 sparse->header.version = 1;
1056 sparse->nr_areas = nr_areas;
1057 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1058 sparse->areas[0].offset =
1059 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1060 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1063 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1064 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1068 gvt_dbg_core("get region info bar:%d\n", info.index);
1071 case VFIO_PCI_ROM_REGION_INDEX:
1072 case VFIO_PCI_VGA_REGION_INDEX:
1073 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1077 gvt_dbg_core("get region info index:%d\n", info.index);
1081 struct vfio_region_info_cap_type cap_type = {
1082 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1083 .header.version = 1 };
1085 if (info.index >= VFIO_PCI_NUM_REGIONS +
1086 vgpu->vdev.num_regions)
1089 i = info.index - VFIO_PCI_NUM_REGIONS;
1092 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1093 info.size = vgpu->vdev.region[i].size;
1094 info.flags = vgpu->vdev.region[i].flags;
1096 cap_type.type = vgpu->vdev.region[i].type;
1097 cap_type.subtype = vgpu->vdev.region[i].subtype;
1099 ret = vfio_info_add_capability(&caps,
1107 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1108 switch (cap_type_id) {
1109 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1110 ret = vfio_info_add_capability(&caps,
1111 &sparse->header, sizeof(*sparse) +
1113 sizeof(*sparse->areas)));
1124 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1125 if (info.argsz < sizeof(info) + caps.size) {
1126 info.argsz = sizeof(info) + caps.size;
1127 info.cap_offset = 0;
1129 vfio_info_cap_shift(&caps, sizeof(info));
1130 if (copy_to_user((void __user *)arg +
1131 sizeof(info), caps.buf,
1136 info.cap_offset = sizeof(info);
1142 return copy_to_user((void __user *)arg, &info, minsz) ?
1144 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1145 struct vfio_irq_info info;
1147 minsz = offsetofend(struct vfio_irq_info, count);
1149 if (copy_from_user(&info, (void __user *)arg, minsz))
1152 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1155 switch (info.index) {
1156 case VFIO_PCI_INTX_IRQ_INDEX:
1157 case VFIO_PCI_MSI_IRQ_INDEX:
1163 info.flags = VFIO_IRQ_INFO_EVENTFD;
1165 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1167 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1168 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1169 VFIO_IRQ_INFO_AUTOMASKED);
1171 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1173 return copy_to_user((void __user *)arg, &info, minsz) ?
1175 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1176 struct vfio_irq_set hdr;
1179 size_t data_size = 0;
1181 minsz = offsetofend(struct vfio_irq_set, count);
1183 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1186 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1187 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1189 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1190 VFIO_PCI_NUM_IRQS, &data_size);
1192 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1196 data = memdup_user((void __user *)(arg + minsz),
1199 return PTR_ERR(data);
1203 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1204 hdr.start, hdr.count, data);
1208 } else if (cmd == VFIO_DEVICE_RESET) {
1209 intel_gvt_ops->vgpu_reset(vgpu);
1211 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1212 struct vfio_device_gfx_plane_info dmabuf;
1215 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1217 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1219 if (dmabuf.argsz < minsz)
1222 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1226 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1228 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1232 if (get_user(dmabuf_id, (__u32 __user *)arg))
1235 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1244 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1247 struct mdev_device *mdev = mdev_from_dev(dev);
1250 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1251 mdev_get_drvdata(mdev);
1252 return sprintf(buf, "%d\n", vgpu->id);
1254 return sprintf(buf, "\n");
1258 hw_id_show(struct device *dev, struct device_attribute *attr,
1261 struct mdev_device *mdev = mdev_from_dev(dev);
1264 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1265 mdev_get_drvdata(mdev);
1266 return sprintf(buf, "%u\n",
1267 vgpu->submission.shadow_ctx->hw_id);
1269 return sprintf(buf, "\n");
1272 static DEVICE_ATTR_RO(vgpu_id);
1273 static DEVICE_ATTR_RO(hw_id);
1275 static struct attribute *intel_vgpu_attrs[] = {
1276 &dev_attr_vgpu_id.attr,
1277 &dev_attr_hw_id.attr,
1281 static const struct attribute_group intel_vgpu_group = {
1282 .name = "intel_vgpu",
1283 .attrs = intel_vgpu_attrs,
1286 static const struct attribute_group *intel_vgpu_groups[] = {
1291 static struct mdev_parent_ops intel_vgpu_ops = {
1292 .mdev_attr_groups = intel_vgpu_groups,
1293 .create = intel_vgpu_create,
1294 .remove = intel_vgpu_remove,
1296 .open = intel_vgpu_open,
1297 .release = intel_vgpu_release,
1299 .read = intel_vgpu_read,
1300 .write = intel_vgpu_write,
1301 .mmap = intel_vgpu_mmap,
1302 .ioctl = intel_vgpu_ioctl,
1305 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1307 struct attribute **kvm_type_attrs;
1308 struct attribute_group **kvm_vgpu_type_groups;
1310 intel_gvt_ops = ops;
1311 if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1312 &kvm_vgpu_type_groups))
1314 intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1316 return mdev_register_device(dev, &intel_vgpu_ops);
1319 static void kvmgt_host_exit(struct device *dev, void *gvt)
1321 mdev_unregister_device(dev);
1324 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1326 struct kvmgt_guest_info *info;
1328 struct kvm_memory_slot *slot;
1331 if (!handle_valid(handle))
1334 info = (struct kvmgt_guest_info *)handle;
1337 idx = srcu_read_lock(&kvm->srcu);
1338 slot = gfn_to_memslot(kvm, gfn);
1340 srcu_read_unlock(&kvm->srcu, idx);
1344 spin_lock(&kvm->mmu_lock);
1346 if (kvmgt_gfn_is_write_protected(info, gfn))
1349 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1350 kvmgt_protect_table_add(info, gfn);
1353 spin_unlock(&kvm->mmu_lock);
1354 srcu_read_unlock(&kvm->srcu, idx);
1358 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1360 struct kvmgt_guest_info *info;
1362 struct kvm_memory_slot *slot;
1365 if (!handle_valid(handle))
1368 info = (struct kvmgt_guest_info *)handle;
1371 idx = srcu_read_lock(&kvm->srcu);
1372 slot = gfn_to_memslot(kvm, gfn);
1374 srcu_read_unlock(&kvm->srcu, idx);
1378 spin_lock(&kvm->mmu_lock);
1380 if (!kvmgt_gfn_is_write_protected(info, gfn))
1383 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1384 kvmgt_protect_table_del(info, gfn);
1387 spin_unlock(&kvm->mmu_lock);
1388 srcu_read_unlock(&kvm->srcu, idx);
1392 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1393 const u8 *val, int len,
1394 struct kvm_page_track_notifier_node *node)
1396 struct kvmgt_guest_info *info = container_of(node,
1397 struct kvmgt_guest_info, track_node);
1399 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1400 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1404 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1405 struct kvm_memory_slot *slot,
1406 struct kvm_page_track_notifier_node *node)
1410 struct kvmgt_guest_info *info = container_of(node,
1411 struct kvmgt_guest_info, track_node);
1413 spin_lock(&kvm->mmu_lock);
1414 for (i = 0; i < slot->npages; i++) {
1415 gfn = slot->base_gfn + i;
1416 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1417 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1418 KVM_PAGE_TRACK_WRITE);
1419 kvmgt_protect_table_del(info, gfn);
1422 spin_unlock(&kvm->mmu_lock);
1425 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1427 struct intel_vgpu *itr;
1428 struct kvmgt_guest_info *info;
1432 mutex_lock(&vgpu->gvt->lock);
1433 for_each_active_vgpu(vgpu->gvt, itr, id) {
1434 if (!handle_valid(itr->handle))
1437 info = (struct kvmgt_guest_info *)itr->handle;
1438 if (kvm && kvm == info->kvm) {
1444 mutex_unlock(&vgpu->gvt->lock);
1448 static int kvmgt_guest_init(struct mdev_device *mdev)
1450 struct kvmgt_guest_info *info;
1451 struct intel_vgpu *vgpu;
1454 vgpu = mdev_get_drvdata(mdev);
1455 if (handle_valid(vgpu->handle))
1458 kvm = vgpu->vdev.kvm;
1459 if (!kvm || kvm->mm != current->mm) {
1460 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1464 if (__kvmgt_vgpu_exist(vgpu, kvm))
1467 info = vzalloc(sizeof(struct kvmgt_guest_info));
1471 vgpu->handle = (unsigned long)info;
1474 kvm_get_kvm(info->kvm);
1476 kvmgt_protect_table_init(info);
1477 gvt_cache_init(vgpu);
1479 mutex_init(&vgpu->dmabuf_lock);
1480 init_completion(&vgpu->vblank_done);
1482 info->track_node.track_write = kvmgt_page_track_write;
1483 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1484 kvm_page_track_register_notifier(kvm, &info->track_node);
1489 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1491 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1492 kvm_put_kvm(info->kvm);
1493 kvmgt_protect_table_destroy(info);
1494 gvt_cache_destroy(info->vgpu);
1500 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1502 /* nothing to do here */
1506 static void kvmgt_detach_vgpu(unsigned long handle)
1508 /* nothing to do here */
1511 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1513 struct kvmgt_guest_info *info;
1514 struct intel_vgpu *vgpu;
1516 if (!handle_valid(handle))
1519 info = (struct kvmgt_guest_info *)handle;
1522 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1528 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1530 unsigned long iova, pfn;
1531 struct kvmgt_guest_info *info;
1533 struct intel_vgpu *vgpu;
1536 if (!handle_valid(handle))
1537 return INTEL_GVT_INVALID_ADDR;
1539 info = (struct kvmgt_guest_info *)handle;
1541 iova = gvt_cache_find(info->vgpu, gfn);
1542 if (iova != INTEL_GVT_INVALID_ADDR)
1545 pfn = INTEL_GVT_INVALID_ADDR;
1546 dev = mdev_dev(info->vgpu->vdev.mdev);
1547 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1549 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1551 return INTEL_GVT_INVALID_ADDR;
1553 /* transfer to host iova for GFX to use DMA */
1554 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1556 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1557 vfio_unpin_pages(dev, &gfn, 1);
1558 return INTEL_GVT_INVALID_ADDR;
1561 gvt_cache_add(info->vgpu, gfn, iova);
1565 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1566 void *buf, unsigned long len, bool write)
1568 struct kvmgt_guest_info *info;
1571 bool kthread = current->mm == NULL;
1573 if (!handle_valid(handle))
1576 info = (struct kvmgt_guest_info *)handle;
1582 idx = srcu_read_lock(&kvm->srcu);
1583 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1584 kvm_read_guest(kvm, gpa, buf, len);
1585 srcu_read_unlock(&kvm->srcu, idx);
1593 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1594 void *buf, unsigned long len)
1596 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1599 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1600 void *buf, unsigned long len)
1602 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1605 static unsigned long kvmgt_virt_to_pfn(void *addr)
1607 return PFN_DOWN(__pa(addr));
1610 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1612 struct kvmgt_guest_info *info;
1615 if (!handle_valid(handle))
1618 info = (struct kvmgt_guest_info *)handle;
1621 return kvm_is_visible_gfn(kvm, gfn);
1625 struct intel_gvt_mpt kvmgt_mpt = {
1626 .host_init = kvmgt_host_init,
1627 .host_exit = kvmgt_host_exit,
1628 .attach_vgpu = kvmgt_attach_vgpu,
1629 .detach_vgpu = kvmgt_detach_vgpu,
1630 .inject_msi = kvmgt_inject_msi,
1631 .from_virt_to_mfn = kvmgt_virt_to_pfn,
1632 .set_wp_page = kvmgt_write_protect_add,
1633 .unset_wp_page = kvmgt_write_protect_remove,
1634 .read_gpa = kvmgt_read_gpa,
1635 .write_gpa = kvmgt_write_gpa,
1636 .gfn_to_mfn = kvmgt_gfn_to_pfn,
1637 .set_opregion = kvmgt_set_opregion,
1638 .get_vfio_device = kvmgt_get_vfio_device,
1639 .put_vfio_device = kvmgt_put_vfio_device,
1640 .is_valid_gfn = kvmgt_is_valid_gfn,
1642 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1644 static int __init kvmgt_init(void)
1649 static void __exit kvmgt_exit(void)
1653 module_init(kvmgt_init);
1654 module_exit(kvmgt_exit);
1656 MODULE_LICENSE("GPL and additional rights");
1657 MODULE_AUTHOR("Intel Corporation");