drm/i915/kvmgt: fix an error code in gvt_dma_map_page()
[linux-block.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
CommitLineData
f30437c5
JS
1/*
2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3 *
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
29 */
30
31#include <linux/init.h>
32#include <linux/device.h>
33#include <linux/mm.h>
f440c8a5 34#include <linux/mmu_context.h>
f30437c5
JS
35#include <linux/types.h>
36#include <linux/list.h>
37#include <linux/rbtree.h>
38#include <linux/spinlock.h>
39#include <linux/eventfd.h>
40#include <linux/uuid.h>
41#include <linux/kvm_host.h>
42#include <linux/vfio.h>
659643f7 43#include <linux/mdev.h>
6846dfeb 44#include <linux/debugfs.h>
f30437c5
JS
45
46#include "i915_drv.h"
47#include "gvt.h"
48
f30437c5
JS
49static const struct intel_gvt_ops *intel_gvt_ops;
50
f30437c5
JS
51/* helper macros copied from vfio-pci */
52#define VFIO_PCI_OFFSET_SHIFT 40
53#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
54#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
55#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
56
b851adea
TZ
57#define OPREGION_SIGNATURE "IntelGraphicsMem"
58
59struct vfio_region;
60struct intel_vgpu_regops {
61 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
62 size_t count, loff_t *ppos, bool iswrite);
63 void (*release)(struct intel_vgpu *vgpu,
64 struct vfio_region *region);
65};
66
f30437c5
JS
67struct vfio_region {
68 u32 type;
69 u32 subtype;
70 size_t size;
71 u32 flags;
b851adea
TZ
72 const struct intel_vgpu_regops *ops;
73 void *data;
f30437c5
JS
74};
75
76struct kvmgt_pgfn {
77 gfn_t gfn;
78 struct hlist_node hnode;
79};
80
81struct kvmgt_guest_info {
82 struct kvm *kvm;
83 struct intel_vgpu *vgpu;
84 struct kvm_page_track_notifier_node track_node;
85#define NR_BKT (1 << 18)
86 struct hlist_head ptable[NR_BKT];
87#undef NR_BKT
6846dfeb 88 struct dentry *debugfs_cache_entries;
f30437c5
JS
89};
90
91struct gvt_dma {
cf4ee73f
CD
92 struct intel_vgpu *vgpu;
93 struct rb_node gfn_node;
94 struct rb_node dma_addr_node;
f30437c5 95 gfn_t gfn;
cf4ee73f 96 dma_addr_t dma_addr;
79e542f5 97 unsigned long size;
cf4ee73f 98 struct kref ref;
f30437c5
JS
99};
100
659643f7
JS
101static inline bool handle_valid(unsigned long handle)
102{
103 return !!(handle & ~0xff);
104}
105
106static int kvmgt_guest_init(struct mdev_device *mdev);
107static void intel_vgpu_release_work(struct work_struct *work);
108static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
109
79e542f5
CD
110static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
111 unsigned long size)
112{
113 int total_pages;
114 int npage;
115 int ret;
116
117 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
118
119 for (npage = 0; npage < total_pages; npage++) {
120 unsigned long cur_gfn = gfn + npage;
121
122 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
123 WARN_ON(ret != 1);
124 }
125}
126
127/* Pin a normal or compound guest page for dma. */
128static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
129 unsigned long size, struct page **page)
130{
131 unsigned long base_pfn = 0;
132 int total_pages;
133 int npage;
134 int ret;
135
136 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
137 /*
138 * We pin the pages one-by-one to avoid allocating a big arrary
139 * on stack to hold pfns.
140 */
141 for (npage = 0; npage < total_pages; npage++) {
142 unsigned long cur_gfn = gfn + npage;
143 unsigned long pfn;
144
145 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
146 IOMMU_READ | IOMMU_WRITE, &pfn);
147 if (ret != 1) {
148 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
149 cur_gfn, ret);
150 goto err;
151 }
152
153 if (!pfn_valid(pfn)) {
154 gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
155 npage++;
156 ret = -EFAULT;
157 goto err;
158 }
159
160 if (npage == 0)
161 base_pfn = pfn;
162 else if (base_pfn + npage != pfn) {
163 gvt_vgpu_err("The pages are not continuous\n");
164 ret = -EINVAL;
165 npage++;
166 goto err;
167 }
168 }
169
170 *page = pfn_to_page(base_pfn);
171 return 0;
172err:
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
174 return ret;
175}
176
cf4ee73f 177static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
79e542f5 178 dma_addr_t *dma_addr, unsigned long size)
b86dc6ed 179{
b86dc6ed 180 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
79e542f5 181 struct page *page = NULL;
cf4ee73f 182 int ret;
b86dc6ed 183
79e542f5
CD
184 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
185 if (ret)
186 return ret;
b86dc6ed 187
cf4ee73f 188 /* Setup DMA mapping. */
79e542f5 189 *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
13bdff33 190 if (dma_mapping_error(dev, *dma_addr)) {
79e542f5
CD
191 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
192 page_to_pfn(page), ret);
193 gvt_unpin_guest_page(vgpu, gfn, size);
13bdff33 194 return -ENOMEM;
cf4ee73f 195 }
b86dc6ed 196
13bdff33 197 return 0;
b86dc6ed
CD
198}
199
cf4ee73f 200static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
79e542f5 201 dma_addr_t dma_addr, unsigned long size)
b86dc6ed
CD
202{
203 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
b86dc6ed 204
79e542f5
CD
205 dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
206 gvt_unpin_guest_page(vgpu, gfn, size);
b86dc6ed
CD
207}
208
cf4ee73f
CD
209static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
210 dma_addr_t dma_addr)
f30437c5 211{
cf4ee73f
CD
212 struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
213 struct gvt_dma *itr;
f30437c5
JS
214
215 while (node) {
cf4ee73f 216 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
f30437c5 217
cf4ee73f 218 if (dma_addr < itr->dma_addr)
f30437c5 219 node = node->rb_left;
cf4ee73f 220 else if (dma_addr > itr->dma_addr)
f30437c5 221 node = node->rb_right;
cf4ee73f
CD
222 else
223 return itr;
f30437c5 224 }
cf4ee73f 225 return NULL;
f30437c5
JS
226}
227
cf4ee73f 228static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
f30437c5 229{
cf4ee73f
CD
230 struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
231 struct gvt_dma *itr;
f30437c5 232
cf4ee73f
CD
233 while (node) {
234 itr = rb_entry(node, struct gvt_dma, gfn_node);
f30437c5 235
cf4ee73f
CD
236 if (gfn < itr->gfn)
237 node = node->rb_left;
238 else if (gfn > itr->gfn)
239 node = node->rb_right;
240 else
241 return itr;
242 }
243 return NULL;
f30437c5
JS
244}
245
5cd4223e 246static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
79e542f5 247 dma_addr_t dma_addr, unsigned long size)
f30437c5
JS
248{
249 struct gvt_dma *new, *itr;
cf4ee73f 250 struct rb_node **link, *parent = NULL;
f30437c5
JS
251
252 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
253 if (!new)
5cd4223e 254 return -ENOMEM;
f30437c5 255
cf4ee73f 256 new->vgpu = vgpu;
f30437c5 257 new->gfn = gfn;
cf4ee73f 258 new->dma_addr = dma_addr;
79e542f5 259 new->size = size;
cf4ee73f 260 kref_init(&new->ref);
f30437c5 261
cf4ee73f
CD
262 /* gfn_cache maps gfn to struct gvt_dma. */
263 link = &vgpu->vdev.gfn_cache.rb_node;
f30437c5
JS
264 while (*link) {
265 parent = *link;
cf4ee73f 266 itr = rb_entry(parent, struct gvt_dma, gfn_node);
f30437c5 267
cf4ee73f 268 if (gfn < itr->gfn)
f30437c5
JS
269 link = &parent->rb_left;
270 else
271 link = &parent->rb_right;
272 }
cf4ee73f
CD
273 rb_link_node(&new->gfn_node, parent, link);
274 rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
f30437c5 275
cf4ee73f
CD
276 /* dma_addr_cache maps dma addr to struct gvt_dma. */
277 parent = NULL;
278 link = &vgpu->vdev.dma_addr_cache.rb_node;
279 while (*link) {
280 parent = *link;
281 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
f30437c5 282
cf4ee73f
CD
283 if (dma_addr < itr->dma_addr)
284 link = &parent->rb_left;
285 else
286 link = &parent->rb_right;
287 }
288 rb_link_node(&new->dma_addr_node, parent, link);
289 rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
6846dfeb
CD
290
291 vgpu->vdev.nr_cache_entries++;
5cd4223e 292 return 0;
f30437c5
JS
293}
294
295static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
296 struct gvt_dma *entry)
297{
cf4ee73f
CD
298 rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
299 rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
f30437c5 300 kfree(entry);
6846dfeb 301 vgpu->vdev.nr_cache_entries--;
f30437c5
JS
302}
303
f30437c5
JS
304static void gvt_cache_destroy(struct intel_vgpu *vgpu)
305{
306 struct gvt_dma *dma;
307 struct rb_node *node = NULL;
f30437c5 308
f16bd3dd
CD
309 for (;;) {
310 mutex_lock(&vgpu->vdev.cache_lock);
cf4ee73f 311 node = rb_first(&vgpu->vdev.gfn_cache);
f16bd3dd
CD
312 if (!node) {
313 mutex_unlock(&vgpu->vdev.cache_lock);
314 break;
315 }
cf4ee73f 316 dma = rb_entry(node, struct gvt_dma, gfn_node);
79e542f5 317 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
f30437c5 318 __gvt_cache_remove_entry(vgpu, dma);
f16bd3dd 319 mutex_unlock(&vgpu->vdev.cache_lock);
f30437c5 320 }
f30437c5
JS
321}
322
cf4ee73f
CD
323static void gvt_cache_init(struct intel_vgpu *vgpu)
324{
325 vgpu->vdev.gfn_cache = RB_ROOT;
326 vgpu->vdev.dma_addr_cache = RB_ROOT;
6846dfeb 327 vgpu->vdev.nr_cache_entries = 0;
cf4ee73f
CD
328 mutex_init(&vgpu->vdev.cache_lock);
329}
330
f30437c5
JS
331static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
332{
333 hash_init(info->ptable);
334}
335
336static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
337{
338 struct kvmgt_pgfn *p;
339 struct hlist_node *tmp;
340 int i;
341
342 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
343 hash_del(&p->hnode);
344 kfree(p);
345 }
346}
347
348static struct kvmgt_pgfn *
349__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
350{
351 struct kvmgt_pgfn *p, *res = NULL;
352
353 hash_for_each_possible(info->ptable, p, hnode, gfn) {
354 if (gfn == p->gfn) {
355 res = p;
356 break;
357 }
358 }
359
360 return res;
361}
362
363static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
364 gfn_t gfn)
365{
366 struct kvmgt_pgfn *p;
367
368 p = __kvmgt_protect_table_find(info, gfn);
369 return !!p;
370}
371
372static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
373{
374 struct kvmgt_pgfn *p;
375
376 if (kvmgt_gfn_is_write_protected(info, gfn))
377 return;
378
c55b1de0 379 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
f30437c5
JS
380 if (WARN(!p, "gfn: 0x%llx\n", gfn))
381 return;
382
383 p->gfn = gfn;
384 hash_add(info->ptable, &p->hnode, gfn);
385}
386
387static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
388 gfn_t gfn)
389{
390 struct kvmgt_pgfn *p;
391
392 p = __kvmgt_protect_table_find(info, gfn);
393 if (p) {
394 hash_del(&p->hnode);
395 kfree(p);
396 }
397}
398
b851adea
TZ
399static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
400 size_t count, loff_t *ppos, bool iswrite)
401{
402 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
403 VFIO_PCI_NUM_REGIONS;
404 void *base = vgpu->vdev.region[i].data;
405 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
406
407 if (pos >= vgpu->vdev.region[i].size || iswrite) {
408 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
409 return -EINVAL;
410 }
411 count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
412 memcpy(buf, base + pos, count);
413
414 return count;
415}
416
417static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
418 struct vfio_region *region)
419{
420}
421
422static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
423 .rw = intel_vgpu_reg_rw_opregion,
424 .release = intel_vgpu_reg_release_opregion,
425};
426
427static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
428 unsigned int type, unsigned int subtype,
429 const struct intel_vgpu_regops *ops,
430 size_t size, u32 flags, void *data)
431{
432 struct vfio_region *region;
433
434 region = krealloc(vgpu->vdev.region,
435 (vgpu->vdev.num_regions + 1) * sizeof(*region),
436 GFP_KERNEL);
437 if (!region)
438 return -ENOMEM;
439
440 vgpu->vdev.region = region;
441 vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
442 vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
443 vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
444 vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
445 vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
446 vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
447 vgpu->vdev.num_regions++;
e546e281
TZ
448 return 0;
449}
450
451static int kvmgt_get_vfio_device(void *p_vgpu)
452{
453 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
b851adea 454
e546e281
TZ
455 vgpu->vdev.vfio_device = vfio_device_get_from_dev(
456 mdev_dev(vgpu->vdev.mdev));
457 if (!vgpu->vdev.vfio_device) {
458 gvt_vgpu_err("failed to get vfio device\n");
459 return -ENODEV;
460 }
b851adea
TZ
461 return 0;
462}
463
e546e281 464
b851adea
TZ
465static int kvmgt_set_opregion(void *p_vgpu)
466{
467 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
468 void *base;
469 int ret;
470
471 /* Each vgpu has its own opregion, although VFIO would create another
472 * one later. This one is used to expose opregion to VFIO. And the
473 * other one created by VFIO later, is used by guest actually.
474 */
475 base = vgpu_opregion(vgpu)->va;
476 if (!base)
477 return -ENOMEM;
478
479 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
480 memunmap(base);
481 return -EINVAL;
482 }
483
484 ret = intel_vgpu_register_reg(vgpu,
485 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
486 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
487 &intel_vgpu_regops_opregion, OPREGION_SIZE,
488 VFIO_REGION_INFO_FLAG_READ, base);
489
490 return ret;
491}
492
e546e281
TZ
493static void kvmgt_put_vfio_device(void *vgpu)
494{
495 if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
496 return;
497
498 vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
499}
500
659643f7
JS
501static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
502{
695fbc08 503 struct intel_vgpu *vgpu = NULL;
659643f7
JS
504 struct intel_vgpu_type *type;
505 struct device *pdev;
506 void *gvt;
5753394b 507 int ret;
659643f7 508
9372e6fe 509 pdev = mdev_parent_dev(mdev);
659643f7
JS
510 gvt = kdev_to_i915(pdev)->gvt;
511
6aa23ced 512 type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
659643f7 513 if (!type) {
695fbc08 514 gvt_vgpu_err("failed to find type %s to create\n",
659643f7 515 kobject_name(kobj));
5753394b
JS
516 ret = -EINVAL;
517 goto out;
659643f7
JS
518 }
519
520 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
521 if (IS_ERR_OR_NULL(vgpu)) {
5753394b 522 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
64c066a9 523 gvt_err("failed to create intel vgpu: %d\n", ret);
5753394b 524 goto out;
659643f7
JS
525 }
526
527 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
528
529 vgpu->vdev.mdev = mdev;
530 mdev_set_drvdata(mdev, vgpu);
531
532 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
99e3123e 533 dev_name(mdev_dev(mdev)));
5753394b
JS
534 ret = 0;
535
536out:
537 return ret;
659643f7
JS
538}
539
540static int intel_vgpu_remove(struct mdev_device *mdev)
541{
542 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
543
544 if (handle_valid(vgpu->handle))
545 return -EBUSY;
546
547 intel_gvt_ops->vgpu_destroy(vgpu);
548 return 0;
549}
550
551static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
552 unsigned long action, void *data)
553{
554 struct intel_vgpu *vgpu = container_of(nb,
555 struct intel_vgpu,
556 vdev.iommu_notifier);
557
558 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
559 struct vfio_iommu_type1_dma_unmap *unmap = data;
cf4ee73f
CD
560 struct gvt_dma *entry;
561 unsigned long iov_pfn, end_iov_pfn;
562
563 iov_pfn = unmap->iova >> PAGE_SHIFT;
564 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
659643f7 565
cf4ee73f
CD
566 mutex_lock(&vgpu->vdev.cache_lock);
567 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
568 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
569 if (!entry)
570 continue;
659643f7 571
79e542f5
CD
572 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
573 entry->size);
cf4ee73f
CD
574 __gvt_cache_remove_entry(vgpu, entry);
575 }
576 mutex_unlock(&vgpu->vdev.cache_lock);
659643f7
JS
577 }
578
579 return NOTIFY_OK;
580}
581
582static int intel_vgpu_group_notifier(struct notifier_block *nb,
583 unsigned long action, void *data)
584{
585 struct intel_vgpu *vgpu = container_of(nb,
586 struct intel_vgpu,
587 vdev.group_notifier);
588
589 /* the only action we care about */
590 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
591 vgpu->vdev.kvm = data;
592
593 if (!data)
594 schedule_work(&vgpu->vdev.release_work);
595 }
596
597 return NOTIFY_OK;
598}
599
600static int intel_vgpu_open(struct mdev_device *mdev)
601{
602 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
603 unsigned long events;
604 int ret;
605
606 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
607 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
608
609 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
99e3123e 610 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
659643f7
JS
611 &vgpu->vdev.iommu_notifier);
612 if (ret != 0) {
695fbc08
TZ
613 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
614 ret);
659643f7
JS
615 goto out;
616 }
617
618 events = VFIO_GROUP_NOTIFY_SET_KVM;
99e3123e 619 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
659643f7
JS
620 &vgpu->vdev.group_notifier);
621 if (ret != 0) {
695fbc08
TZ
622 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
623 ret);
659643f7
JS
624 goto undo_iommu;
625 }
626
364fb6b7
JS
627 ret = kvmgt_guest_init(mdev);
628 if (ret)
629 goto undo_group;
630
b79c52ae
ZW
631 intel_gvt_ops->vgpu_activate(vgpu);
632
364fb6b7
JS
633 atomic_set(&vgpu->vdev.released, 0);
634 return ret;
635
636undo_group:
5824f924 637 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
364fb6b7 638 &vgpu->vdev.group_notifier);
659643f7
JS
639
640undo_iommu:
99e3123e 641 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
659643f7
JS
642 &vgpu->vdev.iommu_notifier);
643out:
644 return ret;
645}
646
d54e7934
XZ
647static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
648{
649 struct eventfd_ctx *trigger;
650
651 trigger = vgpu->vdev.msi_trigger;
652 if (trigger) {
653 eventfd_ctx_put(trigger);
654 vgpu->vdev.msi_trigger = NULL;
655 }
656}
657
659643f7
JS
658static void __intel_vgpu_release(struct intel_vgpu *vgpu)
659{
660 struct kvmgt_guest_info *info;
364fb6b7 661 int ret;
659643f7
JS
662
663 if (!handle_valid(vgpu->handle))
664 return;
665
364fb6b7
JS
666 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
667 return;
668
b79c52ae
ZW
669 intel_gvt_ops->vgpu_deactivate(vgpu);
670
5824f924 671 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
659643f7 672 &vgpu->vdev.iommu_notifier);
364fb6b7
JS
673 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
674
5824f924 675 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
659643f7 676 &vgpu->vdev.group_notifier);
364fb6b7 677 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
659643f7
JS
678
679 info = (struct kvmgt_guest_info *)vgpu->handle;
680 kvmgt_guest_exit(info);
364fb6b7 681
d54e7934
XZ
682 intel_vgpu_release_msi_eventfd_ctx(vgpu);
683
364fb6b7 684 vgpu->vdev.kvm = NULL;
659643f7
JS
685 vgpu->handle = 0;
686}
687
688static void intel_vgpu_release(struct mdev_device *mdev)
689{
690 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
691
692 __intel_vgpu_release(vgpu);
693}
694
695static void intel_vgpu_release_work(struct work_struct *work)
696{
697 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
698 vdev.release_work);
8ff842fd 699
659643f7
JS
700 __intel_vgpu_release(vgpu);
701}
702
f090a00d 703static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
659643f7
JS
704{
705 u32 start_lo, start_hi;
706 u32 mem_type;
659643f7 707
f090a00d 708 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
659643f7 709 PCI_BASE_ADDRESS_MEM_MASK;
f090a00d 710 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
659643f7
JS
711 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
712
713 switch (mem_type) {
714 case PCI_BASE_ADDRESS_MEM_TYPE_64:
715 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
f090a00d 716 + bar + 4));
659643f7
JS
717 break;
718 case PCI_BASE_ADDRESS_MEM_TYPE_32:
719 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
720 /* 1M mem BAR treated as 32-bit BAR */
721 default:
722 /* mem unknown type treated as 32-bit BAR */
723 start_hi = 0;
724 break;
725 }
726
727 return ((u64)start_hi << 32) | start_lo;
728}
729
f090a00d
CD
730static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
731 void *buf, unsigned int count, bool is_write)
732{
733 uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
734 int ret;
735
736 if (is_write)
737 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
738 bar_start + off, buf, count);
739 else
740 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
741 bar_start + off, buf, count);
742 return ret;
743}
744
d480b28a
CD
745static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
746{
747 return off >= vgpu_aperture_offset(vgpu) &&
748 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
749}
750
751static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
752 void *buf, unsigned long count, bool is_write)
753{
754 void *aperture_va;
755
756 if (!intel_vgpu_in_aperture(vgpu, off) ||
757 !intel_vgpu_in_aperture(vgpu, off + count)) {
758 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
759 return -EINVAL;
760 }
761
762 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
763 ALIGN_DOWN(off, PAGE_SIZE),
764 count + offset_in_page(off));
765 if (!aperture_va)
766 return -EIO;
767
768 if (is_write)
769 memcpy(aperture_va + offset_in_page(off), buf, count);
770 else
771 memcpy(buf, aperture_va + offset_in_page(off), count);
772
773 io_mapping_unmap(aperture_va);
774
775 return 0;
776}
777
659643f7
JS
778static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
779 size_t count, loff_t *ppos, bool is_write)
780{
781 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
782 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
783 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
784 int ret = -EINVAL;
785
786
b851adea 787 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
695fbc08 788 gvt_vgpu_err("invalid index: %u\n", index);
659643f7
JS
789 return -EINVAL;
790 }
791
792 switch (index) {
793 case VFIO_PCI_CONFIG_REGION_INDEX:
794 if (is_write)
795 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
796 buf, count);
797 else
798 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
799 buf, count);
800 break;
801 case VFIO_PCI_BAR0_REGION_INDEX:
f090a00d
CD
802 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
803 buf, count, is_write);
659643f7
JS
804 break;
805 case VFIO_PCI_BAR2_REGION_INDEX:
d480b28a 806 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
f090a00d
CD
807 break;
808 case VFIO_PCI_BAR1_REGION_INDEX:
659643f7
JS
809 case VFIO_PCI_BAR3_REGION_INDEX:
810 case VFIO_PCI_BAR4_REGION_INDEX:
811 case VFIO_PCI_BAR5_REGION_INDEX:
812 case VFIO_PCI_VGA_REGION_INDEX:
813 case VFIO_PCI_ROM_REGION_INDEX:
b851adea 814 break;
659643f7 815 default:
b851adea
TZ
816 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
817 return -EINVAL;
818
819 index -= VFIO_PCI_NUM_REGIONS;
820 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
821 ppos, is_write);
659643f7
JS
822 }
823
824 return ret == 0 ? count : ret;
825}
826
a26ca6ad
TZ
827static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
828{
829 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
830 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
831 struct intel_gvt *gvt = vgpu->gvt;
832 int offset;
833
834 /* Only allow MMIO GGTT entry access */
835 if (index != PCI_BASE_ADDRESS_0)
836 return false;
837
838 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
839 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
840
841 return (offset >= gvt->device_info.gtt_start_offset &&
842 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
843 true : false;
844}
845
659643f7
JS
846static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
847 size_t count, loff_t *ppos)
848{
849 unsigned int done = 0;
850 int ret;
851
852 while (count) {
853 size_t filled;
854
a26ca6ad
TZ
855 /* Only support GGTT entry 8 bytes read */
856 if (count >= 8 && !(*ppos % 8) &&
857 gtt_entry(mdev, ppos)) {
858 u64 val;
859
860 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
861 ppos, false);
862 if (ret <= 0)
863 goto read_err;
864
865 if (copy_to_user(buf, &val, sizeof(val)))
866 goto read_err;
867
868 filled = 8;
869 } else if (count >= 4 && !(*ppos % 4)) {
659643f7
JS
870 u32 val;
871
872 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
873 ppos, false);
874 if (ret <= 0)
875 goto read_err;
876
877 if (copy_to_user(buf, &val, sizeof(val)))
878 goto read_err;
879
880 filled = 4;
881 } else if (count >= 2 && !(*ppos % 2)) {
882 u16 val;
883
884 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
885 ppos, false);
886 if (ret <= 0)
887 goto read_err;
888
889 if (copy_to_user(buf, &val, sizeof(val)))
890 goto read_err;
891
892 filled = 2;
893 } else {
894 u8 val;
895
896 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
897 false);
898 if (ret <= 0)
899 goto read_err;
900
901 if (copy_to_user(buf, &val, sizeof(val)))
902 goto read_err;
903
904 filled = 1;
905 }
906
907 count -= filled;
908 done += filled;
909 *ppos += filled;
910 buf += filled;
911 }
912
913 return done;
914
915read_err:
916 return -EFAULT;
917}
918
919static ssize_t intel_vgpu_write(struct mdev_device *mdev,
920 const char __user *buf,
921 size_t count, loff_t *ppos)
922{
923 unsigned int done = 0;
924 int ret;
925
926 while (count) {
927 size_t filled;
928
a26ca6ad
TZ
929 /* Only support GGTT entry 8 bytes write */
930 if (count >= 8 && !(*ppos % 8) &&
931 gtt_entry(mdev, ppos)) {
932 u64 val;
933
934 if (copy_from_user(&val, buf, sizeof(val)))
935 goto write_err;
936
937 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
938 ppos, true);
939 if (ret <= 0)
940 goto write_err;
941
942 filled = 8;
943 } else if (count >= 4 && !(*ppos % 4)) {
659643f7
JS
944 u32 val;
945
946 if (copy_from_user(&val, buf, sizeof(val)))
947 goto write_err;
948
949 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
950 ppos, true);
951 if (ret <= 0)
952 goto write_err;
953
954 filled = 4;
955 } else if (count >= 2 && !(*ppos % 2)) {
956 u16 val;
957
958 if (copy_from_user(&val, buf, sizeof(val)))
959 goto write_err;
960
961 ret = intel_vgpu_rw(mdev, (char *)&val,
962 sizeof(val), ppos, true);
963 if (ret <= 0)
964 goto write_err;
965
966 filled = 2;
967 } else {
968 u8 val;
969
970 if (copy_from_user(&val, buf, sizeof(val)))
971 goto write_err;
972
973 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
974 ppos, true);
975 if (ret <= 0)
976 goto write_err;
977
978 filled = 1;
979 }
980
981 count -= filled;
982 done += filled;
983 *ppos += filled;
984 buf += filled;
985 }
986
987 return done;
988write_err:
989 return -EFAULT;
990}
991
992static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
993{
994 unsigned int index;
995 u64 virtaddr;
996 unsigned long req_size, pgoff = 0;
997 pgprot_t pg_prot;
998 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
999
1000 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1001 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1002 return -EINVAL;
1003
1004 if (vma->vm_end < vma->vm_start)
1005 return -EINVAL;
1006 if ((vma->vm_flags & VM_SHARED) == 0)
1007 return -EINVAL;
1008 if (index != VFIO_PCI_BAR2_REGION_INDEX)
1009 return -EINVAL;
1010
1011 pg_prot = vma->vm_page_prot;
1012 virtaddr = vma->vm_start;
1013 req_size = vma->vm_end - vma->vm_start;
1014 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
1015
1016 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1017}
1018
1019static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1020{
1021 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1022 return 1;
1023
1024 return 0;
1025}
1026
1027static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1028 unsigned int index, unsigned int start,
1029 unsigned int count, uint32_t flags,
1030 void *data)
1031{
1032 return 0;
1033}
1034
1035static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1036 unsigned int index, unsigned int start,
1037 unsigned int count, uint32_t flags, void *data)
1038{
1039 return 0;
1040}
1041
1042static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1043 unsigned int index, unsigned int start, unsigned int count,
1044 uint32_t flags, void *data)
1045{
1046 return 0;
1047}
1048
1049static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1050 unsigned int index, unsigned int start, unsigned int count,
1051 uint32_t flags, void *data)
1052{
1053 struct eventfd_ctx *trigger;
1054
1055 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1056 int fd = *(int *)data;
1057
1058 trigger = eventfd_ctx_fdget(fd);
1059 if (IS_ERR(trigger)) {
695fbc08 1060 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
659643f7
JS
1061 return PTR_ERR(trigger);
1062 }
1063 vgpu->vdev.msi_trigger = trigger;
d54e7934
XZ
1064 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1065 intel_vgpu_release_msi_eventfd_ctx(vgpu);
659643f7
JS
1066
1067 return 0;
1068}
1069
1070static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
1071 unsigned int index, unsigned int start, unsigned int count,
1072 void *data)
1073{
1074 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1075 unsigned int start, unsigned int count, uint32_t flags,
1076 void *data) = NULL;
1077
1078 switch (index) {
1079 case VFIO_PCI_INTX_IRQ_INDEX:
1080 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1081 case VFIO_IRQ_SET_ACTION_MASK:
1082 func = intel_vgpu_set_intx_mask;
1083 break;
1084 case VFIO_IRQ_SET_ACTION_UNMASK:
1085 func = intel_vgpu_set_intx_unmask;
1086 break;
1087 case VFIO_IRQ_SET_ACTION_TRIGGER:
1088 func = intel_vgpu_set_intx_trigger;
1089 break;
1090 }
1091 break;
1092 case VFIO_PCI_MSI_IRQ_INDEX:
1093 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1094 case VFIO_IRQ_SET_ACTION_MASK:
1095 case VFIO_IRQ_SET_ACTION_UNMASK:
1096 /* XXX Need masking support exported */
1097 break;
1098 case VFIO_IRQ_SET_ACTION_TRIGGER:
1099 func = intel_vgpu_set_msi_trigger;
1100 break;
1101 }
1102 break;
1103 }
1104
1105 if (!func)
1106 return -ENOTTY;
1107
1108 return func(vgpu, index, start, count, flags, data);
1109}
1110
1111static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1112 unsigned long arg)
1113{
1114 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1115 unsigned long minsz;
1116
1117 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1118
1119 if (cmd == VFIO_DEVICE_GET_INFO) {
1120 struct vfio_device_info info;
1121
1122 minsz = offsetofend(struct vfio_device_info, num_irqs);
1123
1124 if (copy_from_user(&info, (void __user *)arg, minsz))
1125 return -EFAULT;
1126
1127 if (info.argsz < minsz)
1128 return -EINVAL;
1129
1130 info.flags = VFIO_DEVICE_FLAGS_PCI;
1131 info.flags |= VFIO_DEVICE_FLAGS_RESET;
b851adea
TZ
1132 info.num_regions = VFIO_PCI_NUM_REGIONS +
1133 vgpu->vdev.num_regions;
659643f7
JS
1134 info.num_irqs = VFIO_PCI_NUM_IRQS;
1135
1136 return copy_to_user((void __user *)arg, &info, minsz) ?
1137 -EFAULT : 0;
1138
1139 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1140 struct vfio_region_info info;
1141 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1142 int i, ret;
1143 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1144 size_t size;
1145 int nr_areas = 1;
1146 int cap_type_id;
1147
1148 minsz = offsetofend(struct vfio_region_info, offset);
1149
1150 if (copy_from_user(&info, (void __user *)arg, minsz))
1151 return -EFAULT;
1152
1153 if (info.argsz < minsz)
1154 return -EINVAL;
1155
1156 switch (info.index) {
1157 case VFIO_PCI_CONFIG_REGION_INDEX:
1158 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
02d578e5 1159 info.size = vgpu->gvt->device_info.cfg_space_size;
659643f7
JS
1160 info.flags = VFIO_REGION_INFO_FLAG_READ |
1161 VFIO_REGION_INFO_FLAG_WRITE;
1162 break;
1163 case VFIO_PCI_BAR0_REGION_INDEX:
1164 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1165 info.size = vgpu->cfg_space.bar[info.index].size;
1166 if (!info.size) {
1167 info.flags = 0;
1168 break;
1169 }
1170
1171 info.flags = VFIO_REGION_INFO_FLAG_READ |
1172 VFIO_REGION_INFO_FLAG_WRITE;
1173 break;
1174 case VFIO_PCI_BAR1_REGION_INDEX:
1175 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1176 info.size = 0;
1177 info.flags = 0;
1178 break;
1179 case VFIO_PCI_BAR2_REGION_INDEX:
1180 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1181 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1182 VFIO_REGION_INFO_FLAG_MMAP |
1183 VFIO_REGION_INFO_FLAG_READ |
1184 VFIO_REGION_INFO_FLAG_WRITE;
1185 info.size = gvt_aperture_sz(vgpu->gvt);
1186
1187 size = sizeof(*sparse) +
1188 (nr_areas * sizeof(*sparse->areas));
1189 sparse = kzalloc(size, GFP_KERNEL);
1190 if (!sparse)
1191 return -ENOMEM;
1192
dda01f78
AW
1193 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1194 sparse->header.version = 1;
659643f7
JS
1195 sparse->nr_areas = nr_areas;
1196 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1197 sparse->areas[0].offset =
1198 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1199 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
659643f7
JS
1200 break;
1201
1202 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1203 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1204 info.size = 0;
659643f7 1205 info.flags = 0;
072ec93d 1206
659643f7
JS
1207 gvt_dbg_core("get region info bar:%d\n", info.index);
1208 break;
1209
1210 case VFIO_PCI_ROM_REGION_INDEX:
1211 case VFIO_PCI_VGA_REGION_INDEX:
072ec93d
PZ
1212 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1213 info.size = 0;
1214 info.flags = 0;
1215
659643f7
JS
1216 gvt_dbg_core("get region info index:%d\n", info.index);
1217 break;
1218 default:
1219 {
dda01f78
AW
1220 struct vfio_region_info_cap_type cap_type = {
1221 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1222 .header.version = 1 };
659643f7
JS
1223
1224 if (info.index >= VFIO_PCI_NUM_REGIONS +
1225 vgpu->vdev.num_regions)
1226 return -EINVAL;
1227
1228 i = info.index - VFIO_PCI_NUM_REGIONS;
1229
1230 info.offset =
1231 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1232 info.size = vgpu->vdev.region[i].size;
1233 info.flags = vgpu->vdev.region[i].flags;
1234
1235 cap_type.type = vgpu->vdev.region[i].type;
1236 cap_type.subtype = vgpu->vdev.region[i].subtype;
1237
1238 ret = vfio_info_add_capability(&caps,
dda01f78
AW
1239 &cap_type.header,
1240 sizeof(cap_type));
659643f7
JS
1241 if (ret)
1242 return ret;
1243 }
1244 }
1245
1246 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1247 switch (cap_type_id) {
1248 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1249 ret = vfio_info_add_capability(&caps,
dda01f78
AW
1250 &sparse->header, sizeof(*sparse) +
1251 (sparse->nr_areas *
1252 sizeof(*sparse->areas)));
659643f7
JS
1253 kfree(sparse);
1254 if (ret)
1255 return ret;
1256 break;
1257 default:
1258 return -EINVAL;
1259 }
1260 }
1261
1262 if (caps.size) {
b851adea 1263 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
659643f7
JS
1264 if (info.argsz < sizeof(info) + caps.size) {
1265 info.argsz = sizeof(info) + caps.size;
1266 info.cap_offset = 0;
1267 } else {
1268 vfio_info_cap_shift(&caps, sizeof(info));
1269 if (copy_to_user((void __user *)arg +
1270 sizeof(info), caps.buf,
1271 caps.size)) {
1272 kfree(caps.buf);
1273 return -EFAULT;
1274 }
1275 info.cap_offset = sizeof(info);
1276 }
1277
1278 kfree(caps.buf);
1279 }
1280
1281 return copy_to_user((void __user *)arg, &info, minsz) ?
1282 -EFAULT : 0;
1283 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1284 struct vfio_irq_info info;
1285
1286 minsz = offsetofend(struct vfio_irq_info, count);
1287
1288 if (copy_from_user(&info, (void __user *)arg, minsz))
1289 return -EFAULT;
1290
1291 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1292 return -EINVAL;
1293
1294 switch (info.index) {
1295 case VFIO_PCI_INTX_IRQ_INDEX:
1296 case VFIO_PCI_MSI_IRQ_INDEX:
1297 break;
1298 default:
1299 return -EINVAL;
1300 }
1301
1302 info.flags = VFIO_IRQ_INFO_EVENTFD;
1303
1304 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1305
1306 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1307 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1308 VFIO_IRQ_INFO_AUTOMASKED);
1309 else
1310 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1311
1312 return copy_to_user((void __user *)arg, &info, minsz) ?
1313 -EFAULT : 0;
1314 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1315 struct vfio_irq_set hdr;
1316 u8 *data = NULL;
1317 int ret = 0;
1318 size_t data_size = 0;
1319
1320 minsz = offsetofend(struct vfio_irq_set, count);
1321
1322 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1323 return -EFAULT;
1324
1325 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1326 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1327
1328 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1329 VFIO_PCI_NUM_IRQS, &data_size);
1330 if (ret) {
695fbc08 1331 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
659643f7
JS
1332 return -EINVAL;
1333 }
1334 if (data_size) {
1335 data = memdup_user((void __user *)(arg + minsz),
1336 data_size);
1337 if (IS_ERR(data))
1338 return PTR_ERR(data);
1339 }
1340 }
1341
1342 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1343 hdr.start, hdr.count, data);
1344 kfree(data);
1345
1346 return ret;
1347 } else if (cmd == VFIO_DEVICE_RESET) {
1348 intel_gvt_ops->vgpu_reset(vgpu);
1349 return 0;
e546e281
TZ
1350 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1351 struct vfio_device_gfx_plane_info dmabuf;
1352 int ret = 0;
1353
1354 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1355 dmabuf_id);
1356 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1357 return -EFAULT;
1358 if (dmabuf.argsz < minsz)
1359 return -EINVAL;
1360
1361 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1362 if (ret != 0)
1363 return ret;
1364
1365 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1366 -EFAULT : 0;
1367 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1368 __u32 dmabuf_id;
1369 __s32 dmabuf_fd;
1370
1371 if (get_user(dmabuf_id, (__u32 __user *)arg))
1372 return -EFAULT;
1373
1374 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1375 return dmabuf_fd;
1376
659643f7
JS
1377 }
1378
9f591ae6 1379 return -ENOTTY;
659643f7
JS
1380}
1381
7a7a6561
ZW
1382static ssize_t
1383vgpu_id_show(struct device *dev, struct device_attribute *attr,
1384 char *buf)
1385{
1386 struct mdev_device *mdev = mdev_from_dev(dev);
1387
1388 if (mdev) {
1389 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1390 mdev_get_drvdata(mdev);
1391 return sprintf(buf, "%d\n", vgpu->id);
1392 }
1393 return sprintf(buf, "\n");
1394}
1395
a45050d7
ZW
1396static ssize_t
1397hw_id_show(struct device *dev, struct device_attribute *attr,
1398 char *buf)
1399{
1400 struct mdev_device *mdev = mdev_from_dev(dev);
1401
1402 if (mdev) {
1403 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1404 mdev_get_drvdata(mdev);
1405 return sprintf(buf, "%u\n",
1406a14b 1406 vgpu->submission.shadow_ctx->hw_id);
a45050d7
ZW
1407 }
1408 return sprintf(buf, "\n");
1409}
1410
7a7a6561 1411static DEVICE_ATTR_RO(vgpu_id);
a45050d7 1412static DEVICE_ATTR_RO(hw_id);
7a7a6561
ZW
1413
1414static struct attribute *intel_vgpu_attrs[] = {
1415 &dev_attr_vgpu_id.attr,
a45050d7 1416 &dev_attr_hw_id.attr,
7a7a6561
ZW
1417 NULL
1418};
1419
1420static const struct attribute_group intel_vgpu_group = {
1421 .name = "intel_vgpu",
1422 .attrs = intel_vgpu_attrs,
1423};
1424
1425static const struct attribute_group *intel_vgpu_groups[] = {
1426 &intel_vgpu_group,
1427 NULL,
1428};
1429
6aa23ced 1430static struct mdev_parent_ops intel_vgpu_ops = {
7a7a6561 1431 .mdev_attr_groups = intel_vgpu_groups,
659643f7
JS
1432 .create = intel_vgpu_create,
1433 .remove = intel_vgpu_remove,
1434
1435 .open = intel_vgpu_open,
1436 .release = intel_vgpu_release,
1437
1438 .read = intel_vgpu_read,
1439 .write = intel_vgpu_write,
1440 .mmap = intel_vgpu_mmap,
1441 .ioctl = intel_vgpu_ioctl,
1442};
1443
f30437c5
JS
1444static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1445{
6aa23ced 1446 struct attribute **kvm_type_attrs;
1447 struct attribute_group **kvm_vgpu_type_groups;
f30437c5
JS
1448
1449 intel_gvt_ops = ops;
6aa23ced 1450 if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1451 &kvm_vgpu_type_groups))
1452 return -EFAULT;
1453 intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
f30437c5 1454
659643f7 1455 return mdev_register_device(dev, &intel_vgpu_ops);
f30437c5
JS
1456}
1457
1458static void kvmgt_host_exit(struct device *dev, void *gvt)
1459{
659643f7 1460 mdev_unregister_device(dev);
f30437c5
JS
1461}
1462
f66e5ff7 1463static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
f30437c5 1464{
659643f7
JS
1465 struct kvmgt_guest_info *info;
1466 struct kvm *kvm;
f30437c5
JS
1467 struct kvm_memory_slot *slot;
1468 int idx;
1469
659643f7
JS
1470 if (!handle_valid(handle))
1471 return -ESRCH;
1472
1473 info = (struct kvmgt_guest_info *)handle;
1474 kvm = info->kvm;
1475
f30437c5
JS
1476 idx = srcu_read_lock(&kvm->srcu);
1477 slot = gfn_to_memslot(kvm, gfn);
faaaa53b
JS
1478 if (!slot) {
1479 srcu_read_unlock(&kvm->srcu, idx);
1480 return -EINVAL;
1481 }
f30437c5
JS
1482
1483 spin_lock(&kvm->mmu_lock);
1484
1485 if (kvmgt_gfn_is_write_protected(info, gfn))
1486 goto out;
1487
1488 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1489 kvmgt_protect_table_add(info, gfn);
1490
1491out:
1492 spin_unlock(&kvm->mmu_lock);
1493 srcu_read_unlock(&kvm->srcu, idx);
1494 return 0;
1495}
1496
f66e5ff7 1497static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
f30437c5 1498{
659643f7
JS
1499 struct kvmgt_guest_info *info;
1500 struct kvm *kvm;
f30437c5
JS
1501 struct kvm_memory_slot *slot;
1502 int idx;
1503
659643f7
JS
1504 if (!handle_valid(handle))
1505 return 0;
1506
1507 info = (struct kvmgt_guest_info *)handle;
1508 kvm = info->kvm;
1509
f30437c5
JS
1510 idx = srcu_read_lock(&kvm->srcu);
1511 slot = gfn_to_memslot(kvm, gfn);
faaaa53b
JS
1512 if (!slot) {
1513 srcu_read_unlock(&kvm->srcu, idx);
1514 return -EINVAL;
1515 }
f30437c5
JS
1516
1517 spin_lock(&kvm->mmu_lock);
1518
1519 if (!kvmgt_gfn_is_write_protected(info, gfn))
1520 goto out;
1521
1522 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1523 kvmgt_protect_table_del(info, gfn);
1524
1525out:
1526 spin_unlock(&kvm->mmu_lock);
1527 srcu_read_unlock(&kvm->srcu, idx);
1528 return 0;
1529}
1530
1531static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1532 const u8 *val, int len,
1533 struct kvm_page_track_notifier_node *node)
1534{
1535 struct kvmgt_guest_info *info = container_of(node,
1536 struct kvmgt_guest_info, track_node);
1537
1538 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
4fafba2d
ZW
1539 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1540 (void *)val, len);
f30437c5
JS
1541}
1542
1543static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1544 struct kvm_memory_slot *slot,
1545 struct kvm_page_track_notifier_node *node)
1546{
1547 int i;
1548 gfn_t gfn;
1549 struct kvmgt_guest_info *info = container_of(node,
1550 struct kvmgt_guest_info, track_node);
1551
1552 spin_lock(&kvm->mmu_lock);
1553 for (i = 0; i < slot->npages; i++) {
1554 gfn = slot->base_gfn + i;
1555 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1556 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1557 KVM_PAGE_TRACK_WRITE);
1558 kvmgt_protect_table_del(info, gfn);
1559 }
1560 }
1561 spin_unlock(&kvm->mmu_lock);
1562}
1563
659643f7
JS
1564static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1565{
1566 struct intel_vgpu *itr;
1567 struct kvmgt_guest_info *info;
1568 int id;
1569 bool ret = false;
1570
1571 mutex_lock(&vgpu->gvt->lock);
1572 for_each_active_vgpu(vgpu->gvt, itr, id) {
1573 if (!handle_valid(itr->handle))
1574 continue;
1575
1576 info = (struct kvmgt_guest_info *)itr->handle;
1577 if (kvm && kvm == info->kvm) {
1578 ret = true;
1579 goto out;
1580 }
1581 }
1582out:
1583 mutex_unlock(&vgpu->gvt->lock);
1584 return ret;
1585}
1586
1587static int kvmgt_guest_init(struct mdev_device *mdev)
1588{
1589 struct kvmgt_guest_info *info;
1590 struct intel_vgpu *vgpu;
1591 struct kvm *kvm;
1592
1593 vgpu = mdev_get_drvdata(mdev);
1594 if (handle_valid(vgpu->handle))
1595 return -EEXIST;
1596
1597 kvm = vgpu->vdev.kvm;
1598 if (!kvm || kvm->mm != current->mm) {
695fbc08 1599 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
659643f7
JS
1600 return -ESRCH;
1601 }
1602
1603 if (__kvmgt_vgpu_exist(vgpu, kvm))
1604 return -EEXIST;
1605
1606 info = vzalloc(sizeof(struct kvmgt_guest_info));
1607 if (!info)
1608 return -ENOMEM;
1609
1610 vgpu->handle = (unsigned long)info;
1611 info->vgpu = vgpu;
1612 info->kvm = kvm;
93a15b58 1613 kvm_get_kvm(info->kvm);
659643f7
JS
1614
1615 kvmgt_protect_table_init(info);
1616 gvt_cache_init(vgpu);
1617
e546e281
TZ
1618 mutex_init(&vgpu->dmabuf_lock);
1619 init_completion(&vgpu->vblank_done);
1620
659643f7
JS
1621 info->track_node.track_write = kvmgt_page_track_write;
1622 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1623 kvm_page_track_register_notifier(kvm, &info->track_node);
1624
6846dfeb
CD
1625 info->debugfs_cache_entries = debugfs_create_ulong(
1626 "kvmgt_nr_cache_entries",
1627 0444, vgpu->debugfs,
1628 &vgpu->vdev.nr_cache_entries);
1629 if (!info->debugfs_cache_entries)
1630 gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
1631
659643f7
JS
1632 return 0;
1633}
1634
1635static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1636{
6846dfeb
CD
1637 debugfs_remove(info->debugfs_cache_entries);
1638
659643f7 1639 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
93a15b58 1640 kvm_put_kvm(info->kvm);
659643f7 1641 kvmgt_protect_table_destroy(info);
8ff842fd 1642 gvt_cache_destroy(info->vgpu);
659643f7
JS
1643 vfree(info);
1644
1645 return true;
1646}
1647
f30437c5
JS
1648static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1649{
1650 /* nothing to do here */
1651 return 0;
1652}
1653
1654static void kvmgt_detach_vgpu(unsigned long handle)
1655{
1656 /* nothing to do here */
1657}
1658
1659static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1660{
659643f7
JS
1661 struct kvmgt_guest_info *info;
1662 struct intel_vgpu *vgpu;
f30437c5 1663
659643f7
JS
1664 if (!handle_valid(handle))
1665 return -ESRCH;
f30437c5 1666
659643f7
JS
1667 info = (struct kvmgt_guest_info *)handle;
1668 vgpu = info->vgpu;
1669
d54e7934
XZ
1670 /*
1671 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1672 * config and mmio register isn't restored to default during guest
1673 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1674 * may be enabled, then once this vgpu is active, it will get inject
1675 * vblank interrupt request. But msi_trigger is null until msi is
1676 * enabled by guest. so if msi_trigger is null, success is still
1677 * returned and don't inject interrupt into guest.
1678 */
1679 if (vgpu->vdev.msi_trigger == NULL)
1680 return 0;
1681
659643f7
JS
1682 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1683 return 0;
1684
1685 return -EFAULT;
f30437c5
JS
1686}
1687
1688static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1689{
659643f7 1690 struct kvmgt_guest_info *info;
cf4ee73f 1691 kvm_pfn_t pfn;
f30437c5 1692
659643f7
JS
1693 if (!handle_valid(handle))
1694 return INTEL_GVT_INVALID_ADDR;
1695
1696 info = (struct kvmgt_guest_info *)handle;
cf4ee73f
CD
1697
1698 pfn = gfn_to_pfn(info->kvm, gfn);
1699 if (is_error_noslot_pfn(pfn))
4a0b3444 1700 return INTEL_GVT_INVALID_ADDR;
cf4ee73f
CD
1701
1702 return pfn;
1703}
1704
1705int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
79e542f5 1706 unsigned long size, dma_addr_t *dma_addr)
cf4ee73f
CD
1707{
1708 struct kvmgt_guest_info *info;
1709 struct intel_vgpu *vgpu;
1710 struct gvt_dma *entry;
1711 int ret;
1712
1713 if (!handle_valid(handle))
1714 return -EINVAL;
1715
1716 info = (struct kvmgt_guest_info *)handle;
1717 vgpu = info->vgpu;
1718
1719 mutex_lock(&info->vgpu->vdev.cache_lock);
1720
1721 entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1722 if (!entry) {
79e542f5 1723 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
5cd4223e
CD
1724 if (ret)
1725 goto err_unlock;
1726
79e542f5 1727 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
5cd4223e
CD
1728 if (ret)
1729 goto err_unmap;
cf4ee73f
CD
1730 } else {
1731 kref_get(&entry->ref);
1732 *dma_addr = entry->dma_addr;
4a0b3444 1733 }
f30437c5 1734
cf4ee73f
CD
1735 mutex_unlock(&info->vgpu->vdev.cache_lock);
1736 return 0;
5cd4223e
CD
1737
1738err_unmap:
79e542f5 1739 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
5cd4223e
CD
1740err_unlock:
1741 mutex_unlock(&info->vgpu->vdev.cache_lock);
1742 return ret;
cf4ee73f
CD
1743}
1744
1745static void __gvt_dma_release(struct kref *ref)
1746{
1747 struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1748
79e542f5
CD
1749 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1750 entry->size);
cf4ee73f
CD
1751 __gvt_cache_remove_entry(entry->vgpu, entry);
1752}
1753
1754void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
1755{
1756 struct kvmgt_guest_info *info;
1757 struct gvt_dma *entry;
1758
1759 if (!handle_valid(handle))
1760 return;
1761
1762 info = (struct kvmgt_guest_info *)handle;
1763
1764 mutex_lock(&info->vgpu->vdev.cache_lock);
1765 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1766 if (entry)
1767 kref_put(&entry->ref, __gvt_dma_release);
1768 mutex_unlock(&info->vgpu->vdev.cache_lock);
f30437c5
JS
1769}
1770
f30437c5
JS
1771static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1772 void *buf, unsigned long len, bool write)
1773{
f440c8a5
JS
1774 struct kvmgt_guest_info *info;
1775 struct kvm *kvm;
5180edc2 1776 int idx, ret;
f440c8a5 1777 bool kthread = current->mm == NULL;
f30437c5 1778
659643f7
JS
1779 if (!handle_valid(handle))
1780 return -ESRCH;
1781
f440c8a5
JS
1782 info = (struct kvmgt_guest_info *)handle;
1783 kvm = info->kvm;
f30437c5 1784
f440c8a5
JS
1785 if (kthread)
1786 use_mm(kvm->mm);
f30437c5 1787
5180edc2 1788 idx = srcu_read_lock(&kvm->srcu);
f440c8a5
JS
1789 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1790 kvm_read_guest(kvm, gpa, buf, len);
5180edc2 1791 srcu_read_unlock(&kvm->srcu, idx);
f440c8a5
JS
1792
1793 if (kthread)
1794 unuse_mm(kvm->mm);
1795
1796 return ret;
f30437c5
JS
1797}
1798
1799static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1800 void *buf, unsigned long len)
1801{
1802 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1803}
1804
1805static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1806 void *buf, unsigned long len)
1807{
1808 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1809}
1810
1811static unsigned long kvmgt_virt_to_pfn(void *addr)
1812{
1813 return PFN_DOWN(__pa(addr));
1814}
1815
cc753fbe
HY
1816static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1817{
1818 struct kvmgt_guest_info *info;
1819 struct kvm *kvm;
1820
1821 if (!handle_valid(handle))
1822 return false;
1823
1824 info = (struct kvmgt_guest_info *)handle;
1825 kvm = info->kvm;
1826
1827 return kvm_is_visible_gfn(kvm, gfn);
1828
1829}
1830
f30437c5 1831struct intel_gvt_mpt kvmgt_mpt = {
f30437c5
JS
1832 .host_init = kvmgt_host_init,
1833 .host_exit = kvmgt_host_exit,
1834 .attach_vgpu = kvmgt_attach_vgpu,
1835 .detach_vgpu = kvmgt_detach_vgpu,
1836 .inject_msi = kvmgt_inject_msi,
1837 .from_virt_to_mfn = kvmgt_virt_to_pfn,
f66e5ff7
CD
1838 .enable_page_track = kvmgt_page_track_add,
1839 .disable_page_track = kvmgt_page_track_remove,
f30437c5
JS
1840 .read_gpa = kvmgt_read_gpa,
1841 .write_gpa = kvmgt_write_gpa,
1842 .gfn_to_mfn = kvmgt_gfn_to_pfn,
cf4ee73f
CD
1843 .dma_map_guest_page = kvmgt_dma_map_guest_page,
1844 .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
b851adea 1845 .set_opregion = kvmgt_set_opregion,
e546e281
TZ
1846 .get_vfio_device = kvmgt_get_vfio_device,
1847 .put_vfio_device = kvmgt_put_vfio_device,
cc753fbe 1848 .is_valid_gfn = kvmgt_is_valid_gfn,
f30437c5
JS
1849};
1850EXPORT_SYMBOL_GPL(kvmgt_mpt);
1851
1852static int __init kvmgt_init(void)
1853{
1854 return 0;
1855}
1856
1857static void __exit kvmgt_exit(void)
1858{
1859}
1860
1861module_init(kvmgt_init);
1862module_exit(kvmgt_exit);
1863
1864MODULE_LICENSE("GPL and additional rights");
1865MODULE_AUTHOR("Intel Corporation");